aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-12-16 08:51:32 -0500
committerIngo Molnar <mingo@kernel.org>2013-12-16 08:51:32 -0500
commitfe361cfcf40ad4612226347573a8669cd0d44799 (patch)
treee874ef5a29c3bfe3dd67dc2d8962562c00fc8e3a
parentc7f2e3cd6c1f4932ccc4135d050eae3f7c7aef63 (diff)
parent319e2e3f63c348a9b66db4667efa73178e18b17d (diff)
Merge tag 'v3.13-rc4' into perf/core
Merge Linux 3.13-rc4, to refresh this branch with the latest fixes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--CREDITS5
-rw-r--r--Documentation/Changes11
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-expbuf.xml8
-rw-r--r--Documentation/assoc_array.txt574
-rw-r--r--Documentation/device-mapper/cache.txt10
-rw-r--r--Documentation/devicetree/bindings/arc/pmu.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/omap/mpu.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos4-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5250-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5420-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5440-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/atmel-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/8xxx_gpio.txt66
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-omap.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap.txt54
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt2
-rw-r--r--Documentation/devicetree/bindings/net/smsc-lan91c111.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/dma.txt138
-rw-r--r--Documentation/devicetree/bindings/rng/qcom,prng.txt17
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/dmatest.txt72
-rw-r--r--Documentation/filesystems/btrfs.txt34
-rw-r--r--Documentation/gpio/00-INDEX14
-rw-r--r--Documentation/gpio/board.txt115
-rw-r--r--Documentation/gpio/consumer.txt197
-rw-r--r--Documentation/gpio/driver.txt75
-rw-r--r--Documentation/gpio/gpio-legacy.txt (renamed from Documentation/gpio.txt)0
-rw-r--r--Documentation/gpio/gpio.txt119
-rw-r--r--Documentation/gpio/sysfs.txt155
-rw-r--r--Documentation/kernel-parameters.txt11
-rw-r--r--Documentation/mic/mpssd/mpssd.c18
-rw-r--r--Documentation/networking/ip-sysctl.txt3
-rw-r--r--Documentation/networking/packet_mmap.txt10
-rw-r--r--Documentation/power/runtime_pm.txt14
-rw-r--r--Documentation/security/00-INDEX2
-rw-r--r--Documentation/security/IMA-templates.txt87
-rw-r--r--Documentation/security/keys.txt20
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py18
-rw-r--r--Documentation/vm/split_page_table_lock6
-rw-r--r--MAINTAINERS97
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/Kconfig76
-rw-r--r--arch/alpha/include/asm/machvec.h22
-rw-r--r--arch/alpha/include/asm/pal.h71
-rw-r--r--arch/alpha/include/asm/rtc.h11
-rw-r--r--arch/alpha/include/asm/string.h24
-rw-r--r--arch/alpha/include/asm/thread_info.h2
-rw-r--r--arch/alpha/include/uapi/asm/pal.h1
-rw-r--r--arch/alpha/kernel/Makefile1
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c1
-rw-r--r--arch/alpha/kernel/irq_alpha.c16
-rw-r--r--arch/alpha/kernel/machvec_impl.h5
-rw-r--r--arch/alpha/kernel/perf_event.c15
-rw-r--r--arch/alpha/kernel/process.c17
-rw-r--r--arch/alpha/kernel/proto.h6
-rw-r--r--arch/alpha/kernel/rtc.c323
-rw-r--r--arch/alpha/kernel/setup.c23
-rw-r--r--arch/alpha/kernel/smp.c33
-rw-r--r--arch/alpha/kernel/sys_jensen.c2
-rw-r--r--arch/alpha/kernel/sys_marvel.c55
-rw-r--r--arch/alpha/kernel/time.c405
-rw-r--r--arch/alpha/kernel/traps.c15
-rw-r--r--arch/alpha/lib/csum_partial_copy.c10
-rw-r--r--arch/alpha/lib/ev6-memset.S12
-rw-r--r--arch/alpha/lib/memset.S11
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/boot/dts/abilis_tb100.dtsi172
-rw-r--r--arch/arc/boot/dts/abilis_tb100_dvk.dts24
-rw-r--r--arch/arc/boot/dts/abilis_tb101.dtsi178
-rw-r--r--arch/arc/boot/dts/abilis_tb101_dvk.dts24
-rw-r--r--arch/arc/boot/dts/abilis_tb10x.dtsi3
-rw-r--r--arch/arc/boot/dts/angel4.dts4
-rw-r--r--arch/arc/configs/fpga_noramfs_defconfig64
-rw-r--r--arch/arc/include/asm/perf_event.h204
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/include/uapi/asm/unistd.h5
-rw-r--r--arch/arc/kernel/Makefile1
-rw-r--r--arch/arc/kernel/perf_event.c326
-rw-r--r--arch/arc/plat-tb10x/Kconfig2
-rw-r--r--arch/arm/Kconfig11
-rw-r--r--arch/arm/boot/dts/am335x-base0033.dts79
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi29
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts6
-rw-r--r--arch/arm/boot/dts/am3517.dtsi63
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts28
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78230.dtsi24
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi109
-rw-r--r--arch/arm/boot/dts/at91sam9x5_usart3.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi4
-rw-r--r--arch/arm/boot/dts/cros5250-common.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi2
-rw-r--r--arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi4
-rw-r--r--arch/arm/boot/dts/omap-zoom-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap2.dtsi96
-rw-r--r--arch/arm/boot/dts/omap2420.dtsi23
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi49
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts7
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts21
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi85
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts50
-rw-r--r--arch/arm/boot/dts/omap3-igep0030.dts4
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts25
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi42
-rw-r--r--arch/arm/boot/dts/omap34xx-hs.dtsi16
-rw-r--r--arch/arm/boot/dts/omap36xx-hs.dtsi16
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi20
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts12
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi7
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi27
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi42
-rw-r--r--arch/arm/common/edma.c4
-rw-r--r--arch/arm/configs/multi_v7_defconfig3
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/sunxi_defconfig7
-rw-r--r--arch/arm/configs/u8500_defconfig3
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h30
-rw-r--r--arch/arm/include/asm/hardware/iop_adma.h4
-rw-r--r--arch/arm/include/asm/memory.h40
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/thread_info.h6
-rw-r--r--arch/arm/kernel/head-nommu.S4
-rw-r--r--arch/arm/kernel/head.S9
-rw-r--r--arch/arm/kernel/machine_kexec.c17
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/relocate_kernel.S8
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/sigreturn_codes.S40
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kvm/mmu.c34
-rw-r--r--arch/arm/lib/bitops.h2
-rw-r--r--arch/arm/lib/delay-loop.S1
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c7
-rw-r--r--arch/arm/mach-at91/pm.h4
-rw-r--r--arch/arm/mach-at91/sama5d3.c6
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c4
-rw-r--r--arch/arm/mach-davinci/dm355.c3
-rw-r--r--arch/arm/mach-davinci/dm365.c3
-rw-r--r--arch/arm/mach-davinci/dm644x.c3
-rw-r--r--arch/arm/mach-davinci/dm646x.c6
-rw-r--r--arch/arm/mach-footbridge/common.c3
-rw-r--r--arch/arm/mach-footbridge/dc21285.c2
-rw-r--r--arch/arm/mach-footbridge/ebsa285.c22
-rw-r--r--arch/arm/mach-highbank/highbank.c23
-rw-r--r--arch/arm/mach-iop13xx/include/mach/adma.h26
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/board-generic.c18
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/display.c78
-rw-r--r--arch/arm/mach-omap2/dss-common.c2
-rw-r--r--arch/arm/mach-omap2/gpmc.c58
-rw-r--r--arch/arm/mach-omap2/omap-secure.h7
-rw-r--r--arch/arm/mach-omap2/omap4-common.c57
-rw-r--r--arch/arm/mach-omap2/omap_device.c24
-rw-r--r--arch/arm/mach-omap2/omap_device.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c143
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c13
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c12
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c13
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c1
-rw-r--r--arch/arm/mach-omap2/pm34xx.c2
-rw-r--r--arch/arm/mach-omap2/powerdomain.c3
-rw-r--r--arch/arm/mach-omap2/prm44xx_54xx.h2
-rw-r--r--arch/arm/mach-pxa/reset.c8
-rw-r--r--arch/arm/mach-pxa/tosa.c102
-rw-r--r--arch/arm/mach-socfpga/Kconfig1
-rw-r--r--arch/arm/mach-tegra/fuse.c12
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c4
-rw-r--r--arch/arm/mach-vexpress/spc.c40
-rw-r--r--arch/arm/mach-vexpress/spc.h1
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c66
-rw-r--r--arch/arm/mm/dma-mapping.c88
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/nommu.c1
-rw-r--r--arch/arm/mm/pgd.c3
-rw-r--r--arch/arm/mm/proc-v7.S17
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h5
-rw-r--r--arch/arm/xen/p2m.c5
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts2
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/irqflags.h3
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h33
-rw-r--r--arch/arm64/include/asm/thread_info.h6
-rw-r--r--arch/arm64/kernel/debug-monitors.c20
-rw-r--r--arch/arm64/kernel/entry.S29
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/ptrace.c40
-rw-r--r--arch/arm64/kernel/setup.c5
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/avr32/boards/favr-32/setup.c4
-rw-r--r--arch/avr32/boot/u-boot/head.S35
-rw-r--r--arch/avr32/configs/atngw100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_mrmt_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atstk1002_defconfig1
-rw-r--r--arch/avr32/configs/atstk1003_defconfig1
-rw-r--r--arch/avr32/configs/atstk1004_defconfig1
-rw-r--r--arch/avr32/configs/atstk1006_defconfig1
-rw-r--r--arch/avr32/configs/favr-32_defconfig1
-rw-r--r--arch/avr32/configs/hammerhead_defconfig1
-rw-r--r--arch/avr32/configs/merisc_defconfig1
-rw-r--r--arch/avr32/configs/mimc200_defconfig1
-rw-r--r--arch/avr32/include/asm/kprobes.h14
-rw-r--r--arch/avr32/include/asm/thread_info.h2
-rw-r--r--arch/avr32/include/uapi/asm/Kbuild24
-rw-r--r--arch/avr32/include/uapi/asm/auxvec.h6
-rw-r--r--arch/avr32/include/uapi/asm/bitsperlong.h1
-rw-r--r--arch/avr32/include/uapi/asm/byteorder.h6
-rw-r--r--arch/avr32/include/uapi/asm/cachectl.h6
-rw-r--r--arch/avr32/include/uapi/asm/errno.h6
-rw-r--r--arch/avr32/include/uapi/asm/fcntl.h6
-rw-r--r--arch/avr32/include/uapi/asm/ioctl.h6
-rw-r--r--arch/avr32/include/uapi/asm/ioctls.h6
-rw-r--r--arch/avr32/include/uapi/asm/ipcbuf.h1
-rw-r--r--arch/avr32/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/avr32/include/uapi/asm/mman.h1
-rw-r--r--arch/avr32/include/uapi/asm/msgbuf.h6
-rw-r--r--arch/avr32/include/uapi/asm/poll.h1
-rw-r--r--arch/avr32/include/uapi/asm/posix_types.h6
-rw-r--r--arch/avr32/include/uapi/asm/resource.h6
-rw-r--r--arch/avr32/include/uapi/asm/sembuf.h6
-rw-r--r--arch/avr32/include/uapi/asm/setup.h1
-rw-r--r--arch/avr32/include/uapi/asm/shmbuf.h6
-rw-r--r--arch/avr32/include/uapi/asm/sigcontext.h6
-rw-r--r--arch/avr32/include/uapi/asm/siginfo.h6
-rw-r--r--arch/avr32/include/uapi/asm/signal.h1
-rw-r--r--arch/avr32/include/uapi/asm/socket.h6
-rw-r--r--arch/avr32/include/uapi/asm/sockios.h6
-rw-r--r--arch/avr32/include/uapi/asm/stat.h6
-rw-r--r--arch/avr32/include/uapi/asm/statfs.h6
-rw-r--r--arch/avr32/include/uapi/asm/swab.h6
-rw-r--r--arch/avr32/include/uapi/asm/termbits.h6
-rw-r--r--arch/avr32/include/uapi/asm/termios.h1
-rw-r--r--arch/avr32/include/uapi/asm/types.h5
-rw-r--r--arch/avr32/include/uapi/asm/unistd.h1
-rw-r--r--arch/avr32/kernel/entry-avr32b.S3
-rw-r--r--arch/avr32/kernel/head.S20
-rw-r--r--arch/avr32/kernel/time.c2
-rw-r--r--arch/avr32/mach-at32ap/pm.c2
-rw-r--r--arch/blackfin/include/asm/hardirq.h3
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/c6x/include/asm/thread_info.h2
-rw-r--r--arch/cris/include/asm/hardirq.h12
-rw-r--r--arch/cris/include/asm/thread_info.h2
-rw-r--r--arch/frv/include/asm/thread_info.h2
-rw-r--r--arch/hexagon/include/asm/thread_info.h4
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/include/asm/pci.h2
-rw-r--r--arch/ia64/include/asm/thread_info.h3
-rw-r--r--arch/ia64/kernel/entry.S15
-rw-r--r--arch/ia64/kernel/perfmon.c8
-rw-r--r--arch/ia64/pci/pci.c6
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c4
-rw-r--r--arch/m32r/include/asm/hardirq.h16
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m32r/kernel/entry.S8
-rw-r--r--arch/m68k/include/asm/hardirq.h11
-rw-r--r--arch/m68k/include/asm/thread_info.h2
-rw-r--r--arch/m68k/kernel/entry.S40
-rw-r--r--arch/m68k/kernel/ints.c6
-rw-r--r--arch/m68k/platform/68000/entry.S33
-rw-r--r--arch/m68k/platform/68360/entry.S24
-rw-r--r--arch/metag/include/asm/thread_info.h2
-rw-r--r--arch/microblaze/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/parisc/configs/c3000_defconfig2
-rw-r--r--arch/parisc/configs/c8000_defconfig36
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig39
-rw-r--r--arch/parisc/include/asm/serial.h2
-rw-r--r--arch/parisc/include/asm/socket.h11
-rw-r--r--arch/parisc/include/asm/thread_info.h3
-rw-r--r--arch/parisc/include/asm/uaccess.h46
-rw-r--r--arch/parisc/include/uapi/asm/socket.h11
-rw-r--r--arch/parisc/kernel/hardware.c7
-rw-r--r--arch/parisc/kernel/head.S6
-rw-r--r--arch/parisc/kernel/sys_parisc.c25
-rw-r--r--arch/parisc/kernel/unwind.c9
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S138
-rw-r--r--arch/parisc/lib/memcpy.c6
-rw-r--r--arch/parisc/mm/fault.c22
-rw-r--r--arch/parisc/mm/init.c19
-rw-r--r--arch/powerpc/Makefile8
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi82
-rw-r--r--arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi82
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/mpc5121.dtsi1
-rw-r--r--arch/powerpc/boot/dts/xcalibur1501.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5301.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5330.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5370.dts4
-rw-r--r--arch/powerpc/boot/util.S14
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig3
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig3
-rw-r--r--arch/powerpc/configs/pasemi_defconfig7
-rw-r--r--arch/powerpc/configs/pseries_le_defconfig352
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/hvcall.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h6
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h7
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h26
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/include/asm/reg.h7
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h11
-rw-r--r--arch/powerpc/include/asm/timex.h8
-rw-r--r--arch/powerpc/kernel/eeh.c9
-rw-r--r--arch/powerpc/kernel/eeh_event.c9
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/misc_64.S5
-rw-r--r--arch/powerpc/kernel/nvram_64.c2
-rw-r--r--arch/powerpc/kernel/process.c71
-rw-r--r--arch/powerpc/kernel/prom.c20
-rw-r--r--arch/powerpc/kernel/signal_32.c6
-rw-r--r--arch/powerpc/kernel/signal_64.c31
-rw-r--r--arch/powerpc/kernel/smp.c16
-rw-r--r--arch/powerpc/kernel/time.c4
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S6
-rw-r--r--arch/powerpc/kernel/vdso64/sigtramp.S16
-rw-r--r--arch/powerpc/kernel/vio.c2
-rw-r--r--arch/powerpc/mm/gup.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c3
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype25
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c1
-rw-r--r--arch/powerpc/platforms/powernv/rng.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c21
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c17
-rw-r--r--arch/powerpc/platforms/pseries/rng.c1
-rw-r--r--arch/powerpc/platforms/pseries/setup.c42
-rw-r--r--arch/powerpc/platforms/wsp/chroma.c1
-rw-r--r--arch/powerpc/platforms/wsp/h8.c1
-rw-r--r--arch/powerpc/platforms/wsp/ics.c2
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c2
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c1
-rw-r--r--arch/powerpc/platforms/wsp/scom_wsp.c1
-rw-r--r--arch/powerpc/platforms/wsp/wsp.c1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_ocm.c2
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/boot/Makefile4
-rw-r--r--arch/s390/crypto/aes_s390.c50
-rw-r--r--arch/s390/include/asm/ctl_reg.h2
-rw-r--r--arch/s390/include/asm/eadm.h13
-rw-r--r--arch/s390/include/asm/hardirq.h2
-rw-r--r--arch/s390/include/asm/page.h38
-rw-r--r--arch/s390/include/asm/pci.h6
-rw-r--r--arch/s390/include/asm/sclp.h6
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/vdso.h5
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/crash_dump.c22
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/setup.c7
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/time.c46
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S31
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S9
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S24
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S9
-rw-r--r--arch/s390/lib/uaccess_pt.c3
-rw-r--r--arch/s390/pci/pci.c202
-rw-r--r--arch/s390/pci/pci_clp.c8
-rw-r--r--arch/s390/pci/pci_event.c79
-rw-r--r--arch/score/include/asm/thread_info.h2
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sh/kernel/entry-common.S6
-rw-r--r--arch/sparc/include/asm/hardirq_32.h1
-rw-r--r--arch/sparc/include/asm/hardirq_64.h2
-rw-r--r--arch/sparc/include/asm/thread_info_32.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/tlbflush_64.h1
-rw-r--r--arch/sparc/kernel/rtrap_64.S6
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/tile/include/asm/hardirq.h2
-rw-r--r--arch/tile/include/asm/thread_info.h2
-rw-r--r--arch/um/Kconfig.char4
-rw-r--r--arch/um/Kconfig.common5
-rw-r--r--arch/um/Makefile20
-rw-r--r--arch/um/configs/i386_defconfig76
-rw-r--r--arch/um/configs/x86_64_defconfig75
-rw-r--r--arch/um/defconfig899
-rw-r--r--arch/um/drivers/mconsole_kern.c6
-rw-r--r--arch/um/include/asm/processor-generic.h2
-rw-r--r--arch/um/include/asm/thread_info.h2
-rw-r--r--arch/um/include/shared/as-layout.h3
-rw-r--r--arch/um/include/shared/os.h1
-rw-r--r--arch/um/kernel/process.c15
-rw-r--r--arch/um/kernel/sysrq.c102
-rw-r--r--arch/um/kernel/trap.c14
-rw-r--r--arch/um/kernel/um_arch.c2
-rw-r--r--arch/um/os-Linux/signal.c8
-rw-r--r--arch/unicore32/include/asm/thread_info.h6
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/Makefile6
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c2
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c2
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c2
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c2
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c2
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c4
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c2
-rw-r--r--arch/x86/include/asm/atomic.h4
-rw-r--r--arch/x86/include/asm/atomic64_64.h4
-rw-r--r--arch/x86/include/asm/bitops.h6
-rw-r--r--arch/x86/include/asm/local.h4
-rw-r--r--arch/x86/include/asm/pci.h2
-rw-r--r--arch/x86/include/asm/rmwcc.h8
-rw-r--r--arch/x86/include/asm/simd.h11
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h2
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/early-quirks.c4
-rw-r--r--arch/x86/kernel/reboot.c11
-rw-r--r--arch/x86/kvm/lapic.c35
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/mmu_audit.c2
-rw-r--r--arch/x86/kvm/x86.c40
-rw-r--r--arch/x86/mm/pgtable.c6
-rw-r--r--arch/x86/pci/acpi.c4
-rw-r--r--arch/x86/platform/efi/early_printk.c2
-rw-r--r--arch/x86/platform/efi/efi.c7
-rw-r--r--arch/x86/platform/uv/tlb_uv.c5
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--arch/x86/um/Kconfig5
-rw-r--r--arch/x86/um/asm/processor_32.h5
-rw-r--r--arch/x86/um/asm/processor_64.h5
-rw-r--r--arch/x86/um/sysrq_32.c66
-rw-r--r--arch/x86/um/sysrq_64.c8
-rw-r--r--arch/x86/um/vdso/.gitignore2
-rw-r--r--arch/xtensa/include/asm/thread_info.h2
-rw-r--r--block/blk-cgroup.h8
-rw-r--r--block/blk-flush.c19
-rw-r--r--block/blk-mq.c30
-rw-r--r--block/partitions/efi.c5
-rw-r--r--crypto/Kconfig26
-rw-r--r--crypto/Makefile9
-rw-r--r--crypto/ablk_helper.c (renamed from arch/x86/crypto/ablk_helper.c)13
-rw-r--r--crypto/ablkcipher.c21
-rw-r--r--crypto/algif_hash.c5
-rw-r--r--crypto/algif_skcipher.c4
-rw-r--r--crypto/ansi_cprng.c4
-rw-r--r--crypto/asymmetric_keys/Kconfig4
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c1
-rw-r--r--crypto/asymmetric_keys/public_key.c66
-rw-r--r--crypto/asymmetric_keys/public_key.h6
-rw-r--r--crypto/asymmetric_keys/rsa.c19
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c35
-rw-r--r--crypto/asymmetric_keys/x509_parser.h18
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c157
-rw-r--r--crypto/async_tx/async_memcpy.c37
-rw-r--r--crypto/async_tx/async_pq.c174
-rw-r--r--crypto/async_tx/async_raid6_recov.c61
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--crypto/async_tx/async_xor.c123
-rw-r--r--crypto/async_tx/raid6test.c10
-rw-r--r--crypto/authenc.c61
-rw-r--r--crypto/authencesn.c34
-rw-r--r--crypto/ccm.c7
-rw-r--r--crypto/gcm.c2
-rw-r--r--crypto/hash_info.c56
-rw-r--r--crypto/memneq.c138
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/testmgr.c26
-rw-r--r--drivers/acpi/Kconfig11
-rw-r--r--drivers/acpi/ac.c15
-rw-r--r--drivers/acpi/acpi_lpss.c9
-rw-r--r--drivers/acpi/acpi_platform.c2
-rw-r--r--drivers/acpi/acpica/acresrc.h6
-rw-r--r--drivers/acpi/acpica/nsalloc.c18
-rw-r--r--drivers/acpi/acpica/nsutils.c18
-rw-r--r--drivers/acpi/acpica/rscalc.c9
-rw-r--r--drivers/acpi/acpica/rscreate.c36
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c31
-rw-r--r--drivers/acpi/blacklist.c35
-rw-r--r--drivers/acpi/device_pm.c14
-rw-r--r--drivers/acpi/ec.c3
-rw-r--r--drivers/acpi/event.c25
-rw-r--r--drivers/acpi/glue.c53
-rw-r--r--drivers/acpi/nvs.c1
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/scan.c16
-rw-r--r--drivers/acpi/sleep.c2
-rw-r--r--drivers/acpi/sysfs.c54
-rw-r--r--drivers/acpi/video.c87
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-scsi.c1
-rw-r--r--drivers/ata/libata-zpodd.c4
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/base/platform.c4
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/base/regmap/regmap-mmio.c11
-rw-r--r--drivers/base/regmap/regmap.c8
-rw-r--r--drivers/block/null_blk.c16
-rw-r--r--drivers/block/virtio_blk.c5
-rw-r--r--drivers/block/xen-blkfront.c7
-rw-r--r--drivers/char/hw_random/Kconfig25
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/msm-rng.c197
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c141
-rw-r--r--drivers/char/hw_random/pseries-rng.c5
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/char/i8k.c7
-rw-r--r--drivers/char/tpm/Kconfig37
-rw-r--r--drivers/char/tpm/Makefile11
-rw-r--r--drivers/char/tpm/tpm-interface.c (renamed from drivers/char/tpm/tpm.c)138
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/char/tpm/tpm_atmel.c2
-rw-r--r--drivers/char/tpm/tpm_eventlog.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c284
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c710
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c12
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c6
-rw-r--r--drivers/char/tpm/tpm_ppi.c4
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/xen-tpmfront.c2
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/sh_mtu2.c16
-rw-r--r--drivers/clocksource/sh_tmu.c20
-rw-r--r--drivers/connector/cn_proc.c72
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c3
-rw-r--r--drivers/cpufreq/cpufreq_governor.c4
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c1
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c1
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c1
-rw-r--r--drivers/cpufreq/omap-cpufreq.c1
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/crypto/caam/Kconfig25
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/caamalg.c134
-rw-r--r--drivers/crypto/caam/caamhash.c88
-rw-r--r--drivers/crypto/caam/caamrng.c29
-rw-r--r--drivers/crypto/caam/ctrl.c418
-rw-r--r--drivers/crypto/caam/desc.h17
-rw-r--r--drivers/crypto/caam/intern.h20
-rw-r--r--drivers/crypto/caam/jr.c340
-rw-r--r--drivers/crypto/caam/jr.h5
-rw-r--r--drivers/crypto/caam/regs.h14
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h34
-rw-r--r--drivers/crypto/dcp.c49
-rw-r--r--drivers/crypto/ixp4xx_crypto.c26
-rw-r--r--drivers/crypto/mv_cesa.c14
-rw-r--r--drivers/crypto/omap-aes.c6
-rw-r--r--drivers/crypto/omap-sham.c1
-rw-r--r--drivers/crypto/picoxcell_crypto.c32
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c103
-rw-r--r--drivers/crypto/tegra-aes.c26
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/amba-pl08x.c39
-rw-r--r--drivers/dma/at_hdmac.c28
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/cppi41.c178
-rw-r--r--drivers/dma/dma-jz4740.c2
-rw-r--r--drivers/dma/dmaengine.c264
-rw-r--r--drivers/dma/dmatest.c917
-rw-r--r--drivers/dma/dw/core.c29
-rw-r--r--drivers/dma/edma.c369
-rw-r--r--drivers/dma/ep93xx_dma.c30
-rw-r--r--drivers/dma/fsldma.c26
-rw-r--r--drivers/dma/fsldma.h2
-rw-r--r--drivers/dma/imx-dma.c42
-rw-r--r--drivers/dma/imx-sdma.c10
-rw-r--r--drivers/dma/intel_mid_dma.c4
-rw-r--r--drivers/dma/ioat/dma.c53
-rw-r--r--drivers/dma/ioat/dma.h14
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ioat/dma_v2.h1
-rw-r--r--drivers/dma/ioat/dma_v3.c323
-rw-r--r--drivers/dma/ioat/pci.c20
-rw-r--r--drivers/dma/iop-adma.c113
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c8
-rw-r--r--drivers/dma/mmp_tdma.c40
-rw-r--r--drivers/dma/mv_xor.c58
-rw-r--r--drivers/dma/mv_xor.h25
-rw-r--r--drivers/dma/mxs-dma.c178
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/pl330.c32
-rw-r--r--drivers/dma/ppc4xx/adma.c272
-rw-r--r--drivers/dma/s3c24xx-dma.c33
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c11
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/dma/sh/shdmac.c4
-rw-r--r--drivers/dma/ste_dma40.c7
-rw-r--r--drivers/dma/tegra20-apb-dma.c6
-rw-r--r--drivers/dma/timb_dma.c37
-rw-r--r--drivers/dma/txx9dmac.c29
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/extcon/extcon-arizona.c4
-rw-r--r--drivers/extcon/extcon-class.c3
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/firmware/efi/efi-pstore.c163
-rw-r--r--drivers/firmware/efi/efivars.c12
-rw-r--r--drivers/firmware/efi/vars.c12
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-davinci.c4
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c8
-rw-r--r--drivers/gpio/gpio-msm-v2.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-pl061.c10
-rw-r--r--drivers/gpio/gpio-rcar.c2
-rw-r--r--drivers/gpio/gpio-tb10x.c1
-rw-r--r--drivers/gpio/gpio-twl4030.c13
-rw-r--r--drivers/gpio/gpio-ucb1400.c1
-rw-r--r--drivers/gpio/gpiolib.c79
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_sysfs.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c60
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c25
-rw-r--r--drivers/gpu/drm/i915/intel_display.c47
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c36
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c19
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c445
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c17
-rw-r--r--drivers/gpu/drm/radeon/cik.c57
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c13
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c13
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c13
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c129
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h69
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen4
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/si.c11
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.c34
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c254
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c165
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c30
-rw-r--r--drivers/gpu/host1x/bus.c5
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c4
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c4
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-appleir.c3
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-kye.c14
-rw-r--r--drivers/hid/hid-multitouch.c6
-rw-r--r--drivers/hid/hid-sensor-hub.c22
-rw-r--r--drivers/hid/hid-sony.c53
-rw-r--r--drivers/hid/hid-wiimote-core.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/acpi_power_meter.c5
-rw-r--r--drivers/hwmon/asus_atk0110.c1
-rw-r--r--drivers/hwmon/hih6130.c16
-rw-r--r--drivers/hwmon/lm75.c3
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/hwmon/lm90.c4
-rw-r--r--drivers/hwmon/nct6775.c91
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/vt8231.c2
-rw-r--r--drivers/hwmon/w83l786ng.c13
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c3
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c4
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c30
-rw-r--r--drivers/i2c/i2c-core.c25
-rw-r--r--drivers/i2c/i2c-mux.c2
-rw-r--r--drivers/ide/ide-acpi.c5
-rw-r--r--drivers/idle/intel_idle.c24
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c5
-rw-r--r--drivers/iio/accel/kxsd9.c7
-rw-r--r--drivers/iio/adc/at91_adc.c1
-rw-r--r--drivers/iio/adc/mcp3422.c8
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c7
-rw-r--r--drivers/iio/common/hid-sensors/Kconfig9
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c29
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c5
-rw-r--r--drivers/iio/light/Kconfig3
-rw-r--r--drivers/iio/light/hid-sensor-als.c5
-rw-r--r--drivers/iio/magnetometer/Kconfig2
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c5
-rw-r--r--drivers/iio/magnetometer/mag3110.c7
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c99
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h6
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c10
-rw-r--r--drivers/input/keyboard/adp5588-keys.c3
-rw-r--r--drivers/input/keyboard/adp5589-keys.c3
-rw-r--r--drivers/input/keyboard/bf54x-keys.c3
-rw-r--r--drivers/input/misc/adxl34x.c2
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c5
-rw-r--r--drivers/input/misc/pcf8574_keypad.c7
-rw-r--r--drivers/input/mouse/alps.c206
-rw-r--r--drivers/input/mouse/alps.h1
-rw-r--r--drivers/input/mouse/elantech.c1
-rw-r--r--drivers/input/serio/serio.c24
-rw-r--r--drivers/input/touchscreen/Kconfig11
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/atmel-wm97xx.c2
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c3
-rw-r--r--drivers/input/touchscreen/sur40.c466
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c22
-rw-r--r--drivers/iommu/arm-smmu.c66
-rw-r--r--drivers/irqchip/irq-gic.c9
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c8
-rw-r--r--drivers/isdn/mISDN/socket.c13
-rw-r--r--drivers/leds/leds-pwm.c53
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/md/dm-bufio.c5
-rw-r--r--drivers/md/dm-cache-policy-mq.c13
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-delay.c23
-rw-r--r--drivers/md/dm-snap.c71
-rw-r--r--drivers/md/dm-stats.c1
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-thin-metadata.c8
-rw-r--r--drivers/md/dm-thin-metadata.h1
-rw-r--r--drivers/md/dm-thin.c66
-rw-r--r--drivers/md/md.c147
-rw-r--r--drivers/md/persistent-data/dm-array.c10
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c6
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h7
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c32
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c8
-rw-r--r--drivers/md/raid1.c162
-rw-r--r--drivers/md/raid1.h15
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c425
-rw-r--r--drivers/md/raid5.h16
-rw-r--r--drivers/media/common/siano/smscoreapi.h4
-rw-r--r--drivers/media/common/siano/smsdvb.h2
-rw-r--r--drivers/media/dvb-core/dvb_demux.c9
-rw-r--r--drivers/media/dvb-frontends/af9033.c12
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb-frontends/dib8000.c4
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c18
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c1
-rw-r--r--drivers/media/i2c/adv7183_regs.h6
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c2
-rw-r--r--drivers/media/i2c/m5mols/m5mols_controls.c2
-rw-r--r--drivers/media/i2c/mt9p031.c1
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/soc_camera/ov5642.c2
-rw-r--r--drivers/media/i2c/ths7303.c3
-rw-r--r--drivers/media/i2c/wm8775.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c3
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/pluto2/pluto2.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/platform/coda.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c2
-rw-r--r--drivers/media/platform/m2m-deinterlace.c3
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c46
-rw-r--r--drivers/media/platform/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c7
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c4
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c2
-rw-r--r--drivers/media/platform/timblogiw.c2
-rw-r--r--drivers/media/platform/vivi.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c4
-rw-r--r--drivers/media/radio/radio-shark.c4
-rw-r--r--drivers/media/radio/radio-shark2.c4
-rw-r--r--drivers/media/radio/radio-si476x.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/tef6862.c2
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/tuners/mt2063.c4
-rw-r--r--drivers/media/tuners/tuner-xc2028-types.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c17
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c4
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/gspca/gl860/gl860.c2
-rw-r--r--drivers/media/usb/gspca/pac207.c2
-rw-r--r--drivers/media/usb/gspca/pac7302.c2
-rw-r--r--drivers/media/usb/gspca/stk1135.c3
-rw-r--r--drivers/media/usb/gspca/stv0680.c2
-rw-r--r--drivers/media/usb/gspca/sunplus.c1
-rw-r--r--drivers/media/usb/gspca/zc3xx.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv.c174
-rw-r--r--drivers/media/usb/uvc/uvc_video.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c29
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c3
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/lpc_ich.c2
-rw-r--r--drivers/mfd/sec-core.c30
-rw-r--r--drivers/mfd/sec-irq.c6
-rw-r--r--drivers/mfd/ti-ssp.c2
-rw-r--r--drivers/misc/carma/carma-fpga.c3
-rw-r--r--drivers/misc/enclosure.c7
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mic/card/mic_virtio.c33
-rw-r--r--drivers/misc/mic/card/mic_virtio.h7
-rw-r--r--drivers/misc/mic/host/mic_boot.c2
-rw-r--r--drivers/misc/mic/host/mic_virtio.c30
-rw-r--r--drivers/misc/mic/host/mic_x100.c4
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/host/omap.c45
-rw-r--r--drivers/mtd/nand/atmel_nand.c3
-rw-r--r--drivers/mtd/nand/fsmc_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/net/bonding/bond_main.c10
-rw-r--r--drivers/net/bonding/bond_options.c13
-rw-r--r--drivers/net/bonding/bond_sysfs.c34
-rw-r--r--drivers/net/bonding/bonding.h10
-rw-r--r--drivers/net/can/c_can/c_can.c22
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c17
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c5
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c70
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h82
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c278
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c230
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c23
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c64
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c44
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c60
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c8
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c39
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c78
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c66
-rw-r--r--drivers/net/ethernet/sfc/rx.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c45
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c12
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c55
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c26
-rw-r--r--drivers/net/ethernet/via/via-velocity.c11
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c51
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c12
-rw-r--r--drivers/net/macvtap.c33
-rw-r--r--drivers/net/phy/micrel.c15
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/phy/vitesse.c132
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/team/team.c33
-rw-r--r--drivers/net/tun.c28
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/r8152.c114
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/virtio_net.c185
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h42
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c87
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/ath/regd.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c28
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c7
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c21
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c7
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c6
-rw-r--r--drivers/net/wireless/libertas/debugfs.c6
-rw-r--r--drivers/net/wireless/libertas/if_cs.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c21
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c23
-rw-r--r--drivers/net/wireless/mwifiex/fw.h4
-rw-r--r--drivers/net/wireless/mwifiex/ie.c11
-rw-r--r--drivers/net/wireless/mwifiex/main.c28
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c10
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c46
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c9
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c29
-rw-r--r--drivers/net/wireless/mwifiex/usb.c27
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c4
-rw-r--r--drivers/net/wireless/rtlwifi/base.c93
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h6
-rw-r--r--drivers/net/xen-netback/interface.c24
-rw-r--r--drivers/net/xen-netback/netback.c267
-rw-r--r--drivers/net/xen-netfront.c7
-rw-r--r--drivers/ntb/ntb_hw.c121
-rw-r--r--drivers/ntb/ntb_hw.h7
-rw-r--r--drivers/ntb/ntb_regs.h16
-rw-r--r--drivers/ntb/ntb_transport.c163
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/host/pci-mvebu.c5
-rw-r--r--drivers/pci/host/pci-tegra.c12
-rw-r--r--drivers/pci/host/pcie-designware.c2
-rw-r--r--drivers/pci/hotplug/Kconfig4
-rw-r--r--drivers/pci/hotplug/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c2
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c12
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c14
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c2
-rw-r--r--drivers/pci/hotplug/cpcihp_generic.c20
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c22
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.h18
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c4
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c10
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c5
-rw-r--r--drivers/pci/hotplug/ibmphp.h12
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c109
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c103
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c24
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c71
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c76
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c8
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c6
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c6
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c8
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp.h6
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c8
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c3
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c19
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c8
-rw-r--r--drivers/pci/hotplug/shpchp.h8
-rw-r--r--drivers/pci/hotplug/shpchp_core.c10
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/pci/ioapic.c2
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/irq.c2
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci-acpi.c8
-rw-r--r--drivers/pci/pci-driver.c54
-rw-r--r--drivers/pci/pci-label.c6
-rw-r--r--drivers/pci/pci-stub.c4
-rw-r--r--drivers/pci/pci-sysfs.c28
-rw-r--r--drivers/pci/pci.c54
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/pcie/aspm.c2
-rw-r--r--drivers/pci/pcie/pme.c4
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/pcie/portdrv_bus.c4
-rw-r--r--drivers/pci/pcie/portdrv_core.c2
-rw-r--r--drivers/pci/pcie/portdrv_pci.c7
-rw-r--r--drivers/pci/probe.c10
-rw-r--r--drivers/pci/proc.c2
-rw-r--r--drivers/pci/quirks.c108
-rw-r--r--drivers/pci/remove.c6
-rw-r--r--drivers/pci/search.c12
-rw-r--r--drivers/pci/setup-bus.c18
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pci/syscall.c2
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c6
-rw-r--r--drivers/pinctrl/pinctrl-abx500.h2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c2
-rw-r--r--drivers/platform/Kconfig1
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/chrome/Kconfig28
-rw-r--r--drivers/platform/chrome/Makefile2
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c (renamed from drivers/platform/x86/chromeos_laptop.c)0
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/apple-gmux.c2
-rw-r--r--drivers/platform/x86/asus-laptop.c5
-rw-r--r--drivers/platform/x86/dell-laptop.c288
-rw-r--r--drivers/platform/x86/dell-wmi.c7
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/hp-wmi.c14
-rw-r--r--drivers/platform/x86/ideapad-laptop.c4
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c4
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c117
-rw-r--r--drivers/platform/x86/panasonic-laptop.c5
-rw-r--r--drivers/platform/x86/sony-laptop.c47
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/platform/x86/topstar-laptop.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/pnp/driver.c12
-rw-r--r--drivers/pnp/pnpacpi/core.c10
-rw-r--r--drivers/powercap/powercap_sys.c7
-rw-r--r--drivers/regulator/arizona-micsupp.c54
-rw-r--r--drivers/regulator/as3722-regulator.c2
-rw-r--r--drivers/regulator/core.c14
-rw-r--r--drivers/regulator/gpio-regulator.c7
-rw-r--r--drivers/regulator/pfuze100-regulator.c14
-rw-r--r--drivers/regulator/s5m8767.c2
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/rtc-at91rm9200.c11
-rw-r--r--drivers/rtc/rtc-s5m.c118
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/scm_blk.c24
-rw-r--r--drivers/s390/block/scm_blk_cluster.c2
-rw-r--r--drivers/s390/char/Makefile3
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/sclp.h5
-rw-r--r--drivers/s390/char/sclp_cmd.c180
-rw-r--r--drivers/s390/char/sclp_early.c263
-rw-r--r--drivers/s390/char/sclp_sdias.c78
-rw-r--r--drivers/s390/char/sclp_sdias.h46
-rw-r--r--drivers/s390/char/zcore.c22
-rw-r--r--drivers/s390/cio/eadm_sch.c10
-rw-r--r--drivers/s390/cio/scm.c45
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-sas.c3
-rw-r--r--drivers/scsi/3w-xxxx.c3
-rw-r--r--drivers/scsi/aacraid/commctrl.c3
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c14
-rw-r--r--drivers/scsi/bfa/bfad_attr.c7
-rw-r--r--drivers/scsi/gdth.c1
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/hpsa.c5
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ips.c1
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c91
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h9
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h2
-rw-r--r--drivers/scsi/pmcraid.c20
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c56
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h2
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-bcm63xx.c2
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-mpc512x-psc.c2
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c5
-rw-r--r--drivers/spi/spi-rspi.c3
-rw-r--r--drivers/spi/spi-ti-qspi.c23
-rw-r--r--drivers/spi/spi-txx9.c2
-rw-r--r--drivers/spi/spi.c21
-rw-r--r--drivers/staging/btmtk_usb/btmtk_usb.c3
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c6
-rw-r--r--drivers/staging/comedi/drivers/s626.c2
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c3
-rw-r--r--drivers/staging/iio/magnetometer/Kconfig2
-rw-r--r--drivers/staging/imx-drm/Makefile4
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c4
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c28
-rw-r--r--drivers/staging/nvec/nvec.c3
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c3
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c13
-rw-r--r--drivers/staging/vt6655/hostap.c3
-rw-r--r--drivers/staging/vt6656/baseband.c11
-rw-r--r--drivers/staging/vt6656/hostap.c3
-rw-r--r--drivers/staging/vt6656/rndis.h2
-rw-r--r--drivers/staging/zram/zram_drv.c19
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c17
-rw-r--r--drivers/target/iscsi/iscsi_target.c90
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c86
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h34
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c42
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c20
-rw-r--r--drivers/target/loopback/tcm_loop.c242
-rw-r--r--drivers/target/loopback/tcm_loop.h6
-rw-r--r--drivers/target/sbp/sbp_target.c18
-rw-r--r--drivers/target/target_core_alua.c150
-rw-r--r--drivers/target/target_core_alua.h33
-rw-r--r--drivers/target/target_core_configfs.c123
-rw-r--r--drivers/target/target_core_device.c35
-rw-r--r--drivers/target/target_core_fabric_configfs.c38
-rw-r--r--drivers/target/target_core_file.c2
-rw-r--r--drivers/target/target_core_iblock.c43
-rw-r--r--drivers/target/target_core_internal.h4
-rw-r--r--drivers/target/target_core_pr.c24
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c12
-rw-r--r--drivers/target/target_core_spc.c17
-rw-r--r--drivers/target/target_core_stat.c16
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_tpg.c41
-rw-r--r--drivers/target/target_core_transport.c244
-rw-r--r--drivers/target/target_core_ua.h2
-rw-r--r--drivers/target/target_core_xcopy.c19
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c18
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c18
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c3
-rw-r--r--drivers/thermal/thermal_core.c25
-rw-r--r--drivers/tty/amiserial.c3
-rw-r--r--drivers/tty/n_tty.c22
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/pmac_zilog.c3
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/tty_io.c1
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/core/hub.c7
-rw-r--r--drivers/usb/core/usb-acpi.c4
-rw-r--r--drivers/usb/dwc3/ep0.c2
-rw-r--r--drivers/usb/dwc3/gadget.c5
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/f_fs.c2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c27
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c1
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c7
-rw-r--r--drivers/usb/gadget/storage_common.h4
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c20
-rw-r--r--drivers/usb/gadget/zero.c6
-rw-r--r--drivers/usb/host/ohci-pxa27x.c1
-rw-r--r--drivers/usb/host/xhci-ring.c54
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_cppi41.c164
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/phy/phy-am335x.c5
-rw-r--r--drivers/usb/phy/phy-generic.c68
-rw-r--r--drivers/usb/phy/phy-generic.h4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c2
-rw-r--r--drivers/usb/phy/phy-rcar-gen2-usb.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c37
-rw-r--r--drivers/usb/serial/generic.c12
-rw-r--r--drivers/usb/serial/mos7840.c32
-rw-r--r--drivers/usb/serial/option.c27
-rw-r--r--drivers/usb/serial/pl2303.c30
-rw-r--r--drivers/usb/serial/spcp8x5.c30
-rw-r--r--drivers/usb/wusbcore/devconnect.c72
-rw-r--r--drivers/usb/wusbcore/security.c98
-rw-r--r--drivers/usb/wusbcore/wusbhc.h6
-rw-r--r--drivers/vhost/scsi.c18
-rw-r--r--drivers/video/atmel_lcdfb.c1
-rw-r--r--drivers/video/kyro/fbdev.c6
-rw-r--r--drivers/video/offb.c29
-rw-r--r--drivers/video/omap2/displays-new/panel-sony-acx565akm.c5
-rw-r--r--drivers/video/sh_mobile_meram.c2
-rw-r--r--drivers/video/vt8500lcdfb.c25
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/ep93xx_wdt.c1
-rw-r--r--drivers/watchdog/ie6xx_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/kempld_wdt.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/orion_wdt.c1
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/sc1200wdt.c3
-rw-r--r--drivers/watchdog/shwdt.c1
-rw-r--r--drivers/watchdog/softdog.c1
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c1
-rw-r--r--drivers/watchdog/txx9wdt.c1
-rw-r--r--drivers/watchdog/ux500_wdt.c1
-rw-r--r--drivers/xen/grant-table.c6
-rw-r--r--drivers/xen/pci.c6
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--fs/9p/vfs_dentry.c19
-rw-r--r--fs/affs/Changes2
-rw-r--r--fs/aio.c138
-rw-r--r--fs/bio.c2
-rw-r--r--fs/btrfs/Kconfig15
-rw-r--r--fs/btrfs/async-thread.c1
-rw-r--r--fs/btrfs/check-integrity.c57
-rw-r--r--fs/btrfs/check-integrity.h2
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c21
-rw-r--r--fs/btrfs/extent-tree.c22
-rw-r--r--fs/btrfs/extent_io.c23
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/ordered-data.c3
-rw-r--r--fs/btrfs/relocation.c81
-rw-r--r--fs/btrfs/scrub.c39
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/super.c5
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/btrfs/tree-log.c5
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/cache.c3
-rw-r--r--fs/ceph/caps.c27
-rw-r--r--fs/ceph/dir.c11
-rw-r--r--fs/ceph/inode.c49
-rw-r--r--fs/ceph/mds_client.c61
-rw-r--r--fs/ceph/mds_client.h1
-rw-r--r--fs/ceph/super.h8
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/ioctl.c6
-rw-r--r--fs/cifs/smb2ops.c99
-rw-r--r--fs/cifs/smb2pdu.c92
-rw-r--r--fs/cifs/smb2pdu.h12
-rw-r--r--fs/cifs/smb2proto.h1
-rw-r--r--fs/cifs/smbfsctl.h2
-rw-r--r--fs/configfs/dir.c28
-rw-r--r--fs/coredump.c6
-rw-r--r--fs/dcache.c86
-rw-r--r--fs/dlm/netlink.c10
-rw-r--r--fs/ecryptfs/file.c8
-rw-r--r--fs/efivarfs/super.c11
-rw-r--r--fs/eventpoll.c3
-rw-r--r--fs/exec.c5
-rw-r--r--fs/gfs2/glock.c3
-rw-r--r--fs/gfs2/inode.c5
-rw-r--r--fs/gfs2/lock_dlm.c8
-rw-r--r--fs/gfs2/quota.c23
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/hfsplus/wrapper.c17
-rw-r--r--fs/hostfs/hostfs_kern.c11
-rw-r--r--fs/libfs.c12
-rw-r--r--fs/logfs/dev_bdev.c13
-rw-r--r--fs/namei.c11
-rw-r--r--fs/nfs/blocklayout/blocklayout.h1
-rw-r--r--fs/nfs/blocklayout/extents.c2
-rw-r--r--fs/nfs/dns_resolve.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h15
-rw-r--r--fs/nfs/nfs4_fs.h8
-rw-r--r--fs/nfs/nfs4proc.c30
-rw-r--r--fs/nfsd/nfs4xdr.c3
-rw-r--r--fs/nfsd/nfscache.c9
-rw-r--r--fs/nfsd/vfs.c173
-rw-r--r--fs/pipe.c39
-rw-r--r--fs/proc/base.c14
-rw-r--r--fs/proc/generic.c18
-rw-r--r--fs/proc/inode.c14
-rw-r--r--fs/proc/namespaces.c8
-rw-r--r--fs/quota/netlink.c16
-rw-r--r--fs/squashfs/Kconfig72
-rw-r--r--fs/squashfs/Makefile5
-rw-r--r--fs/squashfs/block.c36
-rw-r--r--fs/squashfs/cache.c28
-rw-r--r--fs/squashfs/decompressor.c59
-rw-r--r--fs/squashfs/decompressor.h24
-rw-r--r--fs/squashfs/decompressor_multi.c198
-rw-r--r--fs/squashfs/decompressor_multi_percpu.c97
-rw-r--r--fs/squashfs/decompressor_single.c85
-rw-r--r--fs/squashfs/file.c142
-rw-r--r--fs/squashfs/file_cache.c38
-rw-r--r--fs/squashfs/file_direct.c176
-rw-r--r--fs/squashfs/lzo_wrapper.c47
-rw-r--r--fs/squashfs/page_actor.c100
-rw-r--r--fs/squashfs/page_actor.h81
-rw-r--r--fs/squashfs/squashfs.h20
-rw-r--r--fs/squashfs/squashfs_fs_sb.h4
-rw-r--r--fs/squashfs/super.c10
-rw-r--r--fs/squashfs/xz_wrapper.c105
-rw-r--r--fs/squashfs/zlib_wrapper.c64
-rw-r--r--fs/sysfs/file.c22
-rw-r--r--fs/xfs/xfs_bmap.c38
-rw-r--r--fs/xfs/xfs_discard.c5
-rw-r--r--fs/xfs/xfs_fsops.c6
-rw-r--r--fs/xfs/xfs_ioctl.c3
-rw-r--r--fs/xfs/xfs_ioctl32.c3
-rw-r--r--fs/xfs/xfs_mount.c15
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--fs/xfs/xfs_trans_inode.c8
-rw-r--r--fs/xfs/xfs_trans_resv.c3
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/asm-generic/simd.h14
-rw-r--r--include/asm-generic/word-at-a-time.h8
-rw-r--r--include/crypto/ablk_helper.h (renamed from arch/x86/include/asm/crypto/ablk_helper.h)0
-rw-r--r--include/crypto/algapi.h18
-rw-r--r--include/crypto/authenc.h12
-rw-r--r--include/crypto/hash_info.h40
-rw-r--r--include/crypto/public_key.h25
-rw-r--r--include/crypto/scatterwalk.h3
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h3
-rw-r--r--include/drm/ttm/ttm_object.h61
-rw-r--r--include/keys/big_key-type.h25
-rw-r--r--include/keys/keyring-type.h17
-rw-r--r--include/keys/system_keyring.h23
-rw-r--r--include/linux/acpi.h23
-rw-r--r--include/linux/assoc_array.h92
-rw-r--r--include/linux/assoc_array_priv.h182
-rw-r--r--include/linux/audit.h15
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/compiler-intel.h2
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/device.h12
-rw-r--r--include/linux/dmaengine.h76
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genl_magic_func.h53
-rw-r--r--include/linux/gpio/driver.h14
-rw-r--r--include/linux/hid-sensor-hub.h5
-rw-r--r--include/linux/hid-sensor-ids.h12
-rw-r--r--include/linux/hugetlb.h15
-rw-r--r--include/linux/if_macvlan.h17
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h7
-rw-r--r--include/linux/irqreturn.h2
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kexec.h3
-rw-r--r--include/linux/key-type.h6
-rw-r--r--include/linux/key.h52
-rw-r--r--include/linux/mfd/samsung/core.h3
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/mm.h9
-rw-r--r--include/linux/mm_types.h30
-rw-r--r--include/linux/msi.h10
-rw-r--r--include/linux/net.h10
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/nfs4.h10
-rw-r--r--include/linux/nfs_fs.h18
-rw-r--r--include/linux/padata.h3
-rw-r--r--include/linux/pci-acpi.h4
-rw-r--r--include/linux/pci.h86
-rw-r--r--include/linux/pci_hotplug.h5
-rw-r--r--include/linux/pcieport_if.h2
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/platform_data/edma.h8
-rw-r--r--include/linux/preempt_mask.h41
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/security.h26
-rw-r--r--include/linux/seqlock.h29
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/slab.h111
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/tegra-powergate.h27
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/wusb.h2
-rw-r--r--include/linux/user_namespace.h6
-rw-r--r--include/linux/wait.h25
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/net/genetlink.h131
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ipv6.h9
-rw-r--r--include/net/ping.h3
-rw-r--r--include/net/sctp/structs.h7
-rw-r--r--include/net/sock.h6
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/sound/memalloc.h2
-rw-r--r--include/sound/soc-dapm.h3
-rw-r--r--include/target/target_core_backend.h5
-rw-r--r--include/target/target_core_base.h84
-rw-r--r--include/target/target_core_configfs.h1
-rw-r--r--include/target/target_core_fabric.h2
-rw-r--r--include/trace/events/btrfs.h4
-rw-r--r--include/trace/ftrace.h5
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/audit.h26
-rw-r--r--include/uapi/linux/eventpoll.h13
-rw-r--r--include/uapi/linux/genetlink.h2
-rw-r--r--include/uapi/linux/hash_info.h37
-rw-r--r--include/uapi/linux/if_link.h4
-rw-r--r--include/uapi/linux/input.h3
-rw-r--r--include/uapi/linux/keyctl.h1
-rw-r--r--include/uapi/linux/mic_common.h40
-rw-r--r--include/uapi/linux/netlink_diag.h1
-rw-r--r--include/uapi/linux/packet_diag.h1
-rw-r--r--include/uapi/linux/pci_regs.h72
-rw-r--r--include/uapi/linux/pkt_sched.h7
-rw-r--r--include/uapi/linux/raid/md_p.h1
-rw-r--r--include/uapi/linux/unix_diag.h1
-rw-r--r--include/uapi/sound/compress_offload.h6
-rw-r--r--init/Kconfig27
-rw-r--r--init/main.c2
-rw-r--r--ipc/shm.c37
-rw-r--r--kernel/.gitignore1
-rw-r--r--kernel/Makefile50
-rw-r--r--kernel/audit.c153
-rw-r--r--kernel/audit.h3
-rw-r--r--kernel/auditfilter.c3
-rw-r--r--kernel/auditsc.c133
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/cgroup.c42
-rw-r--r--kernel/cpuset.c8
-rw-r--r--kernel/extable.c4
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/irq/pm.c2
-rw-r--r--kernel/irq/settings.h7
-rw-r--r--kernel/irq/spurious.c12
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/modsign_certificate.S12
-rw-r--r--kernel/modsign_pubkey.c104
-rw-r--r--kernel/module-internal.h2
-rw-r--r--kernel/module_signing.c11
-rw-r--r--kernel/padata.c9
-rw-r--r--kernel/power/snapshot.c3
-rw-r--r--kernel/power/user.c1
-rw-r--r--kernel/rcu/tree_plugin.h4
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/fair.c27
-rw-r--r--kernel/system_certificates.S20
-rw-r--r--kernel/system_keyring.c105
-rw-r--r--kernel/taskstats.c38
-rw-r--r--kernel/time/tick-common.c15
-rw-r--r--kernel/time/tick-sched.c25
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/timer.c5
-rw-r--r--kernel/trace/ftrace.c64
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_syscalls.c10
-rw-r--r--kernel/user.c4
-rw-r--r--kernel/user_namespace.c6
-rw-r--r--kernel/workqueue.c82
-rw-r--r--lib/Kconfig14
-rw-r--r--lib/Makefile5
-rw-r--r--lib/assoc_array.c1746
-rw-r--r--lib/lockref.c9
-rw-r--r--lib/mpi/mpiutil.c3
-rw-r--r--lib/percpu_ida.c5
-rw-r--r--lib/random32.c12
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/hugetlb.c51
-rw-r--r--mm/memcontrol.c41
-rw-r--r--mm/memory.c7
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c48
-rw-r--r--mm/shmem.c36
-rw-r--r--mm/slab.c571
-rw-r--r--mm/slub.c45
-rw-r--r--mm/swap.c143
-rw-r--r--net/Kconfig4
-rw-r--r--net/appletalk/ddp.c16
-rw-r--r--net/atm/common.c2
-rw-r--r--net/ax25/af_ax25.c4
-rw-r--r--net/bluetooth/af_bluetooth.c9
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/l2cap_core.c3
-rw-r--r--net/bluetooth/rfcomm/core.c3
-rw-r--r--net/bluetooth/rfcomm/sock.c7
-rw-r--r--net/bluetooth/sco.c1
-rw-r--r--net/bluetooth/smp.c3
-rw-r--r--net/bridge/br_if.c3
-rw-r--r--net/bridge/br_private.h10
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_vlan.c24
-rw-r--r--net/bridge/netfilter/ebt_ip6.c8
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/compat.c5
-rw-r--r--net/core/dev.c13
-rw-r--r--net/core/drop_monitor.c16
-rw-r--r--net/core/iovec.c3
-rw-r--r--net/core/pktgen.c7
-rw-r--r--net/core/skbuff.c76
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/hsr/hsr_framereg.c3
-rw-r--r--net/hsr/hsr_netlink.c92
-rw-r--r--net/ieee802154/6lowpan.c4
-rw-r--r--net/ieee802154/dgram.c3
-rw-r--r--net/ieee802154/ieee802154.h21
-rw-r--r--net/ieee802154/netlink.c45
-rw-r--r--net/ieee802154/nl-mac.c79
-rw-r--r--net/ieee802154/nl-phy.c37
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/fib_rules.c5
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/ip_vti.c1
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c1
-rw-r--r--net/ipv4/ping.c56
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c8
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_memcontrol.c9
-rw-r--r--net/ipv4/tcp_metrics.c10
-rw-r--r--net/ipv4/tcp_offload.c31
-rw-r--r--net/ipv4/tcp_output.c7
-rw-r--r--net/ipv4/udp.c61
-rw-r--r--net/ipv6/addrconf.c40
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/datagram.c9
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_tunnel.c18
-rw-r--r--net/ipv6/ndisc.c5
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c1
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/protocol.c4
-rw-r--r--net/ipv6/raw.c9
-rw-r--r--net/ipv6/route.c30
-rw-r--r--net/ipv6/sit.c68
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/ipv6/tcpv6_offload.c32
-rw-r--r--net/ipv6/udp.c10
-rw-r--r--net/ipx/af_ipx.c3
-rw-r--r--net/irda/af_irda.c4
-rw-r--r--net/irda/irnetlink.c5
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/l2tp/l2tp_ip.c4
-rw-r--r--net/l2tp/l2tp_ip6.c3
-rw-r--r--net/l2tp/l2tp_netlink.c9
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/mac80211/cfg.c15
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh.c20
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c7
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/scan.c2
-rw-r--r--net/mac80211/spectmgmt.c2
-rw-r--r--net/mac80211/util.c11
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c4
-rw-r--r--net/netfilter/nf_synproxy_core.c7
-rw-r--r--net/netfilter/nf_tables_api.c46
-rw-r--r--net/netfilter/nft_compat.c19
-rw-r--r--net/netfilter/xt_hashlimit.c25
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_mgmt.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.c4
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/netlink/genetlink.c531
-rw-r--r--net/netrom/af_netrom.c3
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/nfc/netlink.c41
-rw-r--r--net/nfc/rawsock.c2
-rw-r--r--net/openvswitch/datapath.c59
-rw-r--r--net/openvswitch/datapath.h1
-rw-r--r--net/openvswitch/dp_notify.c11
-rw-r--r--net/packet/af_packet.c108
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/phonet/datagram.c9
-rw-r--r--net/rds/ib_send.c5
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rose/af_rose.c8
-rw-r--r--net/rxrpc/ar-recvmsg.c9
-rw-r--r--net/sched/act_api.c26
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_ipt.c4
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c1
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/act_skbedit.c1
-rw-r--r--net/sched/sch_fq.c40
-rw-r--r--net/sched/sch_htb.c20
-rw-r--r--net/sched/sch_netem.c7
-rw-r--r--net/sched/sch_tbf.c139
-rw-r--r--net/sctp/associola.c11
-rw-r--r--net/sctp/output.c6
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c36
-rw-r--r--net/sctp/sysctl.c76
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/socket.c24
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/rpc_pipe.c11
-rw-r--r--net/tipc/core.c7
-rw-r--r--net/tipc/handler.c11
-rw-r--r--net/tipc/link.c3
-rw-r--r--net/tipc/netlink.c11
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/unix/af_unix.c13
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/vmw_vsock/vmci_transport.c2
-rw-r--r--net/wimax/op-msg.c27
-rw-r--r--net/wimax/op-reset.c17
-rw-r--r--net/wimax/op-rfkill.c21
-rw-r--r--net/wimax/op-state-get.c17
-rw-r--r--net/wimax/stack.c96
-rw-r--r--net/wimax/wimax-internal.h8
-rw-r--r--net/wireless/core.c9
-rw-r--r--net/wireless/ibss.c18
-rw-r--r--net/wireless/nl80211.c266
-rw-r--r--net/x25/af_x25.c3
-rw-r--r--scripts/asn1_compiler.c2
-rwxr-xr-xscripts/checkpatch.pl1
-rwxr-xr-xscripts/recordmcount.pl3
-rw-r--r--scripts/sortextable.c5
-rw-r--r--security/Makefile1
-rw-r--r--security/apparmor/audit.c14
-rw-r--r--security/apparmor/capability.c15
-rw-r--r--security/apparmor/domain.c16
-rw-r--r--security/apparmor/include/audit.h1
-rw-r--r--security/apparmor/include/capability.h5
-rw-r--r--security/apparmor/include/ipc.h4
-rw-r--r--security/apparmor/ipc.c9
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/capability.c15
-rw-r--r--security/integrity/digsig.c7
-rw-r--r--security/integrity/digsig_asymmetric.c11
-rw-r--r--security/integrity/evm/evm_main.c4
-rw-r--r--security/integrity/evm/evm_posix_acl.c3
-rw-r--r--security/integrity/iint.c2
-rw-r--r--security/integrity/ima/Kconfig64
-rw-r--r--security/integrity/ima/Makefile2
-rw-r--r--security/integrity/ima/ima.h106
-rw-r--r--security/integrity/ima/ima_api.c154
-rw-r--r--security/integrity/ima/ima_appraise.c106
-rw-r--r--security/integrity/ima/ima_crypto.c141
-rw-r--r--security/integrity/ima/ima_fs.c75
-rw-r--r--security/integrity/ima/ima_init.c40
-rw-r--r--security/integrity/ima/ima_main.c63
-rw-r--r--security/integrity/ima/ima_policy.c1
-rw-r--r--security/integrity/ima/ima_queue.c10
-rw-r--r--security/integrity/ima/ima_template.c187
-rw-r--r--security/integrity/ima/ima_template_lib.c351
-rw-r--r--security/integrity/ima/ima_template_lib.h49
-rw-r--r--security/integrity/integrity.h40
-rw-r--r--security/keys/Kconfig29
-rw-r--r--security/keys/Makefile2
-rw-r--r--security/keys/big_key.c207
-rw-r--r--security/keys/compat.c3
-rw-r--r--security/keys/gc.c47
-rw-r--r--security/keys/internal.h74
-rw-r--r--security/keys/key.c110
-rw-r--r--security/keys/keyctl.c3
-rw-r--r--security/keys/keyring.c1535
-rw-r--r--security/keys/persistent.c167
-rw-r--r--security/keys/proc.c17
-rw-r--r--security/keys/process_keys.c141
-rw-r--r--security/keys/request_key.c60
-rw-r--r--security/keys/request_key_auth.c31
-rw-r--r--security/keys/sysctl.c11
-rw-r--r--security/keys/user_defined.c18
-rw-r--r--security/lsm_audit.c3
-rw-r--r--security/security.c13
-rw-r--r--security/selinux/hooks.c241
-rw-r--r--security/selinux/include/objsec.h4
-rw-r--r--security/selinux/include/security.h13
-rw-r--r--security/selinux/include/xfrm.h49
-rw-r--r--security/selinux/netlabel.c6
-rw-r--r--security/selinux/netnode.c2
-rw-r--r--security/selinux/nlmsgtab.c2
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/ebitmap.c20
-rw-r--r--security/selinux/ss/ebitmap.h10
-rw-r--r--security/selinux/ss/mls.c22
-rw-r--r--security/selinux/ss/mls_types.h2
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/selinux/ss/services.c24
-rw-r--r--security/selinux/xfrm.c481
-rw-r--r--security/smack/smack.h12
-rw-r--r--security/smack/smack_access.c10
-rw-r--r--security/smack/smack_lsm.c11
-rw-r--r--security/smack/smackfs.c10
-rw-r--r--sound/atmel/abdac.c3
-rw-r--r--sound/firewire/amdtp.c15
-rw-r--r--sound/firewire/amdtp.h1
-rw-r--r--sound/firewire/dice.c4
-rw-r--r--sound/pci/hda/Kconfig3
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_generic.c126
-rw-r--r--sound/pci/hda/hda_generic.h3
-rw-r--r--sound/pci/hda/hda_intel.c12
-rw-r--r--sound/pci/hda/patch_analog.c16
-rw-r--r--sound/pci/hda/patch_conexant.c24
-rw-r--r--sound/pci/hda/patch_hdmi.c32
-rw-r--r--sound/pci/hda/patch_realtek.c211
-rw-r--r--sound/pci/hda/patch_sigmatel.c3
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c2
-rw-r--r--sound/soc/codecs/ab8500-codec.c66
-rw-r--r--sound/soc/codecs/arizona.c4
-rw-r--r--sound/soc/codecs/wm5110.c68
-rw-r--r--sound/soc/codecs/wm8731.c4
-rw-r--r--sound/soc/codecs/wm8990.c2
-rw-r--r--sound/soc/davinci/davinci-pcm.c2
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c3
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c22
-rw-r--r--sound/soc/omap/n810.c4
-rw-r--r--sound/soc/sh/Kconfig1
-rw-r--r--sound/soc/sh/rcar/core.c13
-rw-r--r--sound/soc/sh/rcar/scu.c2
-rw-r--r--sound/soc/soc-core.c4
-rw-r--r--sound/soc/soc-devres.c4
-rw-r--r--sound/soc/soc-pcm.c18
-rw-r--r--sound/usb/endpoint.c16
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--tools/power/cpupower/man/cpupower-idle-info.13
-rw-r--r--tools/power/cpupower/man/cpupower-idle-set.171
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.c4
-rw-r--r--tools/power/x86/turbostat/turbostat.c197
-rw-r--r--tools/usb/Makefile5
-rw-r--r--virt/kvm/kvm_main.c8
1895 files changed, 33264 insertions, 17616 deletions
diff --git a/CREDITS b/CREDITS
index 4fc997d58ab2..4c7738f49357 100644
--- a/CREDITS
+++ b/CREDITS
@@ -655,6 +655,11 @@ S: Stanford University
655S: Stanford, California 94305 655S: Stanford, California 94305
656S: USA 656S: USA
657 657
658N: Carlos Chinea
659E: carlos.chinea@nokia.com
660E: cch.devel@gmail.com
661D: Author of HSI Subsystem
662
658N: Randolph Chung 663N: Randolph Chung
659E: tausq@debian.org 664E: tausq@debian.org
660D: Linux/PA-RISC hacker 665D: Linux/PA-RISC hacker
diff --git a/Documentation/Changes b/Documentation/Changes
index b17580885273..07c75d18154e 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -196,13 +196,6 @@ chmod 0644 /dev/cpu/microcode
196as root before you can use this. You'll probably also want to 196as root before you can use this. You'll probably also want to
197get the user-space microcode_ctl utility to use with this. 197get the user-space microcode_ctl utility to use with this.
198 198
199Powertweak
200----------
201
202If you are running v0.1.17 or earlier, you should upgrade to
203version v0.99.0 or higher. Running old versions may cause problems
204with programs using shared memory.
205
206udev 199udev
207---- 200----
208udev is a userspace application for populating /dev dynamically with 201udev is a userspace application for populating /dev dynamically with
@@ -366,10 +359,6 @@ Intel P6 microcode
366------------------ 359------------------
367o <http://www.urbanmyth.org/microcode/> 360o <http://www.urbanmyth.org/microcode/>
368 361
369Powertweak
370----------
371o <http://powertweak.sourceforge.net/>
372
373udev 362udev
374---- 363----
375o <http://www.kernel.org/pub/linux/utils/kernel/hotplug/udev.html> 364o <http://www.kernel.org/pub/linux/utils/kernel/hotplug/udev.html>
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 6c9d9d37c83a..f5170082bdb3 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -58,7 +58,7 @@
58 </sect1> 58 </sect1>
59 <sect1><title>Wait queues and Wake events</title> 59 <sect1><title>Wait queues and Wake events</title>
60!Iinclude/linux/wait.h 60!Iinclude/linux/wait.h
61!Ekernel/wait.c 61!Ekernel/sched/wait.c
62 </sect1> 62 </sect1>
63 <sect1><title>High-resolution timers</title> 63 <sect1><title>High-resolution timers</title>
64!Iinclude/linux/ktime.h 64!Iinclude/linux/ktime.h
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
index e287c8fc803b..4165e7bfa4ff 100644
--- a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
@@ -73,7 +73,8 @@ range from zero to the maximal number of valid planes for the currently active
73format. For the single-planar API, applications must set <structfield> plane 73format. For the single-planar API, applications must set <structfield> plane
74</structfield> to zero. Additional flags may be posted in the <structfield> 74</structfield> to zero. Additional flags may be posted in the <structfield>
75flags </structfield> field. Refer to a manual for open() for details. 75flags </structfield> field. Refer to a manual for open() for details.
76Currently only O_CLOEXEC is supported. All other fields must be set to zero. 76Currently only O_CLOEXEC, O_RDONLY, O_WRONLY, and O_RDWR are supported. All
77other fields must be set to zero.
77In the case of multi-planar API, every plane is exported separately using 78In the case of multi-planar API, every plane is exported separately using
78multiple <constant> VIDIOC_EXPBUF </constant> calls. </para> 79multiple <constant> VIDIOC_EXPBUF </constant> calls. </para>
79 80
@@ -170,8 +171,9 @@ multi-planar API. Otherwise this value must be set to zero. </entry>
170 <entry>__u32</entry> 171 <entry>__u32</entry>
171 <entry><structfield>flags</structfield></entry> 172 <entry><structfield>flags</structfield></entry>
172 <entry>Flags for the newly created file, currently only <constant> 173 <entry>Flags for the newly created file, currently only <constant>
173O_CLOEXEC </constant> is supported, refer to the manual of open() for more 174O_CLOEXEC </constant>, <constant>O_RDONLY</constant>, <constant>O_WRONLY
174details.</entry> 175</constant>, and <constant>O_RDWR</constant> are supported, refer to the manual
176of open() for more details.</entry>
175 </row> 177 </row>
176 <row> 178 <row>
177 <entry>__s32</entry> 179 <entry>__s32</entry>
diff --git a/Documentation/assoc_array.txt b/Documentation/assoc_array.txt
new file mode 100644
index 000000000000..2f2c6cdd73c0
--- /dev/null
+++ b/Documentation/assoc_array.txt
@@ -0,0 +1,574 @@
1 ========================================
2 GENERIC ASSOCIATIVE ARRAY IMPLEMENTATION
3 ========================================
4
5Contents:
6
7 - Overview.
8
9 - The public API.
10 - Edit script.
11 - Operations table.
12 - Manipulation functions.
13 - Access functions.
14 - Index key form.
15
16 - Internal workings.
17 - Basic internal tree layout.
18 - Shortcuts.
19 - Splitting and collapsing nodes.
20 - Non-recursive iteration.
21 - Simultaneous alteration and iteration.
22
23
24========
25OVERVIEW
26========
27
28This associative array implementation is an object container with the following
29properties:
30
31 (1) Objects are opaque pointers. The implementation does not care where they
32 point (if anywhere) or what they point to (if anything).
33
34 [!] NOTE: Pointers to objects _must_ be zero in the least significant bit.
35
36 (2) Objects do not need to contain linkage blocks for use by the array. This
37 permits an object to be located in multiple arrays simultaneously.
38 Rather, the array is made up of metadata blocks that point to objects.
39
40 (3) Objects require index keys to locate them within the array.
41
42 (4) Index keys must be unique. Inserting an object with the same key as one
43 already in the array will replace the old object.
44
45 (5) Index keys can be of any length and can be of different lengths.
46
47 (6) Index keys should encode the length early on, before any variation due to
48 length is seen.
49
50 (7) Index keys can include a hash to scatter objects throughout the array.
51
52 (8) The array can iterated over. The objects will not necessarily come out in
53 key order.
54
55 (9) The array can be iterated over whilst it is being modified, provided the
56 RCU readlock is being held by the iterator. Note, however, under these
57 circumstances, some objects may be seen more than once. If this is a
58 problem, the iterator should lock against modification. Objects will not
59 be missed, however, unless deleted.
60
61(10) Objects in the array can be looked up by means of their index key.
62
63(11) Objects can be looked up whilst the array is being modified, provided the
64 RCU readlock is being held by the thread doing the look up.
65
66The implementation uses a tree of 16-pointer nodes internally that are indexed
67on each level by nibbles from the index key in the same manner as in a radix
68tree. To improve memory efficiency, shortcuts can be emplaced to skip over
69what would otherwise be a series of single-occupancy nodes. Further, nodes
70pack leaf object pointers into spare space in the node rather than making an
71extra branch until as such time an object needs to be added to a full node.
72
73
74==============
75THE PUBLIC API
76==============
77
78The public API can be found in <linux/assoc_array.h>. The associative array is
79rooted on the following structure:
80
81 struct assoc_array {
82 ...
83 };
84
85The code is selected by enabling CONFIG_ASSOCIATIVE_ARRAY.
86
87
88EDIT SCRIPT
89-----------
90
91The insertion and deletion functions produce an 'edit script' that can later be
92applied to effect the changes without risking ENOMEM. This retains the
93preallocated metadata blocks that will be installed in the internal tree and
94keeps track of the metadata blocks that will be removed from the tree when the
95script is applied.
96
97This is also used to keep track of dead blocks and dead objects after the
98script has been applied so that they can be freed later. The freeing is done
99after an RCU grace period has passed - thus allowing access functions to
100proceed under the RCU read lock.
101
102The script appears as outside of the API as a pointer of the type:
103
104 struct assoc_array_edit;
105
106There are two functions for dealing with the script:
107
108 (1) Apply an edit script.
109
110 void assoc_array_apply_edit(struct assoc_array_edit *edit);
111
112 This will perform the edit functions, interpolating various write barriers
113 to permit accesses under the RCU read lock to continue. The edit script
114 will then be passed to call_rcu() to free it and any dead stuff it points
115 to.
116
117 (2) Cancel an edit script.
118
119 void assoc_array_cancel_edit(struct assoc_array_edit *edit);
120
121 This frees the edit script and all preallocated memory immediately. If
122 this was for insertion, the new object is _not_ released by this function,
123 but must rather be released by the caller.
124
125These functions are guaranteed not to fail.
126
127
128OPERATIONS TABLE
129----------------
130
131Various functions take a table of operations:
132
133 struct assoc_array_ops {
134 ...
135 };
136
137This points to a number of methods, all of which need to be provided:
138
139 (1) Get a chunk of index key from caller data:
140
141 unsigned long (*get_key_chunk)(const void *index_key, int level);
142
143 This should return a chunk of caller-supplied index key starting at the
144 *bit* position given by the level argument. The level argument will be a
145 multiple of ASSOC_ARRAY_KEY_CHUNK_SIZE and the function should return
146 ASSOC_ARRAY_KEY_CHUNK_SIZE bits. No error is possible.
147
148
149 (2) Get a chunk of an object's index key.
150
151 unsigned long (*get_object_key_chunk)(const void *object, int level);
152
153 As the previous function, but gets its data from an object in the array
154 rather than from a caller-supplied index key.
155
156
157 (3) See if this is the object we're looking for.
158
159 bool (*compare_object)(const void *object, const void *index_key);
160
161 Compare the object against an index key and return true if it matches and
162 false if it doesn't.
163
164
165 (4) Diff the index keys of two objects.
166
167 int (*diff_objects)(const void *object, const void *index_key);
168
169 Return the bit position at which the index key of the specified object
170 differs from the given index key or -1 if they are the same.
171
172
173 (5) Free an object.
174
175 void (*free_object)(void *object);
176
177 Free the specified object. Note that this may be called an RCU grace
178 period after assoc_array_apply_edit() was called, so synchronize_rcu() may
179 be necessary on module unloading.
180
181
182MANIPULATION FUNCTIONS
183----------------------
184
185There are a number of functions for manipulating an associative array:
186
187 (1) Initialise an associative array.
188
189 void assoc_array_init(struct assoc_array *array);
190
191 This initialises the base structure for an associative array. It can't
192 fail.
193
194
195 (2) Insert/replace an object in an associative array.
196
197 struct assoc_array_edit *
198 assoc_array_insert(struct assoc_array *array,
199 const struct assoc_array_ops *ops,
200 const void *index_key,
201 void *object);
202
203 This inserts the given object into the array. Note that the least
204 significant bit of the pointer must be zero as it's used to type-mark
205 pointers internally.
206
207 If an object already exists for that key then it will be replaced with the
208 new object and the old one will be freed automatically.
209
210 The index_key argument should hold index key information and is
211 passed to the methods in the ops table when they are called.
212
213 This function makes no alteration to the array itself, but rather returns
214 an edit script that must be applied. -ENOMEM is returned in the case of
215 an out-of-memory error.
216
217 The caller should lock exclusively against other modifiers of the array.
218
219
220 (3) Delete an object from an associative array.
221
222 struct assoc_array_edit *
223 assoc_array_delete(struct assoc_array *array,
224 const struct assoc_array_ops *ops,
225 const void *index_key);
226
227 This deletes an object that matches the specified data from the array.
228
229 The index_key argument should hold index key information and is
230 passed to the methods in the ops table when they are called.
231
232 This function makes no alteration to the array itself, but rather returns
233 an edit script that must be applied. -ENOMEM is returned in the case of
234 an out-of-memory error. NULL will be returned if the specified object is
235 not found within the array.
236
237 The caller should lock exclusively against other modifiers of the array.
238
239
240 (4) Delete all objects from an associative array.
241
242 struct assoc_array_edit *
243 assoc_array_clear(struct assoc_array *array,
244 const struct assoc_array_ops *ops);
245
246 This deletes all the objects from an associative array and leaves it
247 completely empty.
248
249 This function makes no alteration to the array itself, but rather returns
250 an edit script that must be applied. -ENOMEM is returned in the case of
251 an out-of-memory error.
252
253 The caller should lock exclusively against other modifiers of the array.
254
255
256 (5) Destroy an associative array, deleting all objects.
257
258 void assoc_array_destroy(struct assoc_array *array,
259 const struct assoc_array_ops *ops);
260
261 This destroys the contents of the associative array and leaves it
262 completely empty. It is not permitted for another thread to be traversing
263 the array under the RCU read lock at the same time as this function is
264 destroying it as no RCU deferral is performed on memory release -
265 something that would require memory to be allocated.
266
267 The caller should lock exclusively against other modifiers and accessors
268 of the array.
269
270
271 (6) Garbage collect an associative array.
272
273 int assoc_array_gc(struct assoc_array *array,
274 const struct assoc_array_ops *ops,
275 bool (*iterator)(void *object, void *iterator_data),
276 void *iterator_data);
277
278 This iterates over the objects in an associative array and passes each one
279 to iterator(). If iterator() returns true, the object is kept. If it
280 returns false, the object will be freed. If the iterator() function
281 returns true, it must perform any appropriate refcount incrementing on the
282 object before returning.
283
284 The internal tree will be packed down if possible as part of the iteration
285 to reduce the number of nodes in it.
286
287 The iterator_data is passed directly to iterator() and is otherwise
288 ignored by the function.
289
290 The function will return 0 if successful and -ENOMEM if there wasn't
291 enough memory.
292
293 It is possible for other threads to iterate over or search the array under
294 the RCU read lock whilst this function is in progress. The caller should
295 lock exclusively against other modifiers of the array.
296
297
298ACCESS FUNCTIONS
299----------------
300
301There are two functions for accessing an associative array:
302
303 (1) Iterate over all the objects in an associative array.
304
305 int assoc_array_iterate(const struct assoc_array *array,
306 int (*iterator)(const void *object,
307 void *iterator_data),
308 void *iterator_data);
309
310 This passes each object in the array to the iterator callback function.
311 iterator_data is private data for that function.
312
313 This may be used on an array at the same time as the array is being
314 modified, provided the RCU read lock is held. Under such circumstances,
315 it is possible for the iteration function to see some objects twice. If
316 this is a problem, then modification should be locked against. The
317 iteration algorithm should not, however, miss any objects.
318
319 The function will return 0 if no objects were in the array or else it will
320 return the result of the last iterator function called. Iteration stops
321 immediately if any call to the iteration function results in a non-zero
322 return.
323
324
325 (2) Find an object in an associative array.
326
327 void *assoc_array_find(const struct assoc_array *array,
328 const struct assoc_array_ops *ops,
329 const void *index_key);
330
331 This walks through the array's internal tree directly to the object
332 specified by the index key..
333
334 This may be used on an array at the same time as the array is being
335 modified, provided the RCU read lock is held.
336
337 The function will return the object if found (and set *_type to the object
338 type) or will return NULL if the object was not found.
339
340
341INDEX KEY FORM
342--------------
343
344The index key can be of any form, but since the algorithms aren't told how long
345the key is, it is strongly recommended that the index key includes its length
346very early on before any variation due to the length would have an effect on
347comparisons.
348
349This will cause leaves with different length keys to scatter away from each
350other - and those with the same length keys to cluster together.
351
352It is also recommended that the index key begin with a hash of the rest of the
353key to maximise scattering throughout keyspace.
354
355The better the scattering, the wider and lower the internal tree will be.
356
357Poor scattering isn't too much of a problem as there are shortcuts and nodes
358can contain mixtures of leaves and metadata pointers.
359
360The index key is read in chunks of machine word. Each chunk is subdivided into
361one nibble (4 bits) per level, so on a 32-bit CPU this is good for 8 levels and
362on a 64-bit CPU, 16 levels. Unless the scattering is really poor, it is
363unlikely that more than one word of any particular index key will have to be
364used.
365
366
367=================
368INTERNAL WORKINGS
369=================
370
371The associative array data structure has an internal tree. This tree is
372constructed of two types of metadata blocks: nodes and shortcuts.
373
374A node is an array of slots. Each slot can contain one of four things:
375
376 (*) A NULL pointer, indicating that the slot is empty.
377
378 (*) A pointer to an object (a leaf).
379
380 (*) A pointer to a node at the next level.
381
382 (*) A pointer to a shortcut.
383
384
385BASIC INTERNAL TREE LAYOUT
386--------------------------
387
388Ignoring shortcuts for the moment, the nodes form a multilevel tree. The index
389key space is strictly subdivided by the nodes in the tree and nodes occur on
390fixed levels. For example:
391
392 Level: 0 1 2 3
393 =============== =============== =============== ===============
394 NODE D
395 NODE B NODE C +------>+---+
396 +------>+---+ +------>+---+ | | 0 |
397 NODE A | | 0 | | | 0 | | +---+
398 +---+ | +---+ | +---+ | : :
399 | 0 | | : : | : : | +---+
400 +---+ | +---+ | +---+ | | f |
401 | 1 |---+ | 3 |---+ | 7 |---+ +---+
402 +---+ +---+ +---+
403 : : : : | 8 |---+
404 +---+ +---+ +---+ | NODE E
405 | e |---+ | f | : : +------>+---+
406 +---+ | +---+ +---+ | 0 |
407 | f | | | f | +---+
408 +---+ | +---+ : :
409 | NODE F +---+
410 +------>+---+ | f |
411 | 0 | NODE G +---+
412 +---+ +------>+---+
413 : : | | 0 |
414 +---+ | +---+
415 | 6 |---+ : :
416 +---+ +---+
417 : : | f |
418 +---+ +---+
419 | f |
420 +---+
421
422In the above example, there are 7 nodes (A-G), each with 16 slots (0-f).
423Assuming no other meta data nodes in the tree, the key space is divided thusly:
424
425 KEY PREFIX NODE
426 ========== ====
427 137* D
428 138* E
429 13[0-69-f]* C
430 1[0-24-f]* B
431 e6* G
432 e[0-57-f]* F
433 [02-df]* A
434
435So, for instance, keys with the following example index keys will be found in
436the appropriate nodes:
437
438 INDEX KEY PREFIX NODE
439 =============== ======= ====
440 13694892892489 13 C
441 13795289025897 137 D
442 13889dde88793 138 E
443 138bbb89003093 138 E
444 1394879524789 12 C
445 1458952489 1 B
446 9431809de993ba - A
447 b4542910809cd - A
448 e5284310def98 e F
449 e68428974237 e6 G
450 e7fffcbd443 e F
451 f3842239082 - A
452
453To save memory, if a node can hold all the leaves in its portion of keyspace,
454then the node will have all those leaves in it and will not have any metadata
455pointers - even if some of those leaves would like to be in the same slot.
456
457A node can contain a heterogeneous mix of leaves and metadata pointers.
458Metadata pointers must be in the slots that match their subdivisions of key
459space. The leaves can be in any slot not occupied by a metadata pointer. It
460is guaranteed that none of the leaves in a node will match a slot occupied by a
461metadata pointer. If the metadata pointer is there, any leaf whose key matches
462the metadata key prefix must be in the subtree that the metadata pointer points
463to.
464
465In the above example list of index keys, node A will contain:
466
467 SLOT CONTENT INDEX KEY (PREFIX)
468 ==== =============== ==================
469 1 PTR TO NODE B 1*
470 any LEAF 9431809de993ba
471 any LEAF b4542910809cd
472 e PTR TO NODE F e*
473 any LEAF f3842239082
474
475and node B:
476
477 3 PTR TO NODE C 13*
478 any LEAF 1458952489
479
480
481SHORTCUTS
482---------
483
484Shortcuts are metadata records that jump over a piece of keyspace. A shortcut
485is a replacement for a series of single-occupancy nodes ascending through the
486levels. Shortcuts exist to save memory and to speed up traversal.
487
488It is possible for the root of the tree to be a shortcut - say, for example,
489the tree contains at least 17 nodes all with key prefix '1111'. The insertion
490algorithm will insert a shortcut to skip over the '1111' keyspace in a single
491bound and get to the fourth level where these actually become different.
492
493
494SPLITTING AND COLLAPSING NODES
495------------------------------
496
497Each node has a maximum capacity of 16 leaves and metadata pointers. If the
498insertion algorithm finds that it is trying to insert a 17th object into a
499node, that node will be split such that at least two leaves that have a common
500key segment at that level end up in a separate node rooted on that slot for
501that common key segment.
502
503If the leaves in a full node and the leaf that is being inserted are
504sufficiently similar, then a shortcut will be inserted into the tree.
505
506When the number of objects in the subtree rooted at a node falls to 16 or
507fewer, then the subtree will be collapsed down to a single node - and this will
508ripple towards the root if possible.
509
510
511NON-RECURSIVE ITERATION
512-----------------------
513
514Each node and shortcut contains a back pointer to its parent and the number of
515slot in that parent that points to it. None-recursive iteration uses these to
516proceed rootwards through the tree, going to the parent node, slot N + 1 to
517make sure progress is made without the need for a stack.
518
519The backpointers, however, make simultaneous alteration and iteration tricky.
520
521
522SIMULTANEOUS ALTERATION AND ITERATION
523-------------------------------------
524
525There are a number of cases to consider:
526
527 (1) Simple insert/replace. This involves simply replacing a NULL or old
528 matching leaf pointer with the pointer to the new leaf after a barrier.
529 The metadata blocks don't change otherwise. An old leaf won't be freed
530 until after the RCU grace period.
531
532 (2) Simple delete. This involves just clearing an old matching leaf. The
533 metadata blocks don't change otherwise. The old leaf won't be freed until
534 after the RCU grace period.
535
536 (3) Insertion replacing part of a subtree that we haven't yet entered. This
537 may involve replacement of part of that subtree - but that won't affect
538 the iteration as we won't have reached the pointer to it yet and the
539 ancestry blocks are not replaced (the layout of those does not change).
540
541 (4) Insertion replacing nodes that we're actively processing. This isn't a
542 problem as we've passed the anchoring pointer and won't switch onto the
543 new layout until we follow the back pointers - at which point we've
544 already examined the leaves in the replaced node (we iterate over all the
545 leaves in a node before following any of its metadata pointers).
546
547 We might, however, re-see some leaves that have been split out into a new
548 branch that's in a slot further along than we were at.
549
550 (5) Insertion replacing nodes that we're processing a dependent branch of.
551 This won't affect us until we follow the back pointers. Similar to (4).
552
553 (6) Deletion collapsing a branch under us. This doesn't affect us because the
554 back pointers will get us back to the parent of the new node before we
555 could see the new node. The entire collapsed subtree is thrown away
556 unchanged - and will still be rooted on the same slot, so we shouldn't
557 process it a second time as we'll go back to slot + 1.
558
559Note:
560
561 (*) Under some circumstances, we need to simultaneously change the parent
562 pointer and the parent slot pointer on a node (say, for example, we
563 inserted another node before it and moved it up a level). We cannot do
564 this without locking against a read - so we have to replace that node too.
565
566 However, when we're changing a shortcut into a node this isn't a problem
567 as shortcuts only have one slot and so the parent slot number isn't used
568 when traversing backwards over one. This means that it's okay to change
569 the slot number first - provided suitable barriers are used to make sure
570 the parent slot number is read after the back pointer.
571
572Obsolete blocks and leaves are freed up after an RCU grace period has passed,
573so as long as anyone doing walking or iteration holds the RCU read lock, the
574old superstructure should not go away on them.
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index 274752f8bdf9..719320b5ed3f 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -266,10 +266,12 @@ E.g.
266Invalidation is removing an entry from the cache without writing it 266Invalidation is removing an entry from the cache without writing it
267back. Cache blocks can be invalidated via the invalidate_cblocks 267back. Cache blocks can be invalidated via the invalidate_cblocks
268message, which takes an arbitrary number of cblock ranges. Each cblock 268message, which takes an arbitrary number of cblock ranges. Each cblock
269must be expressed as a decimal value, in the future a variant message 269range's end value is "one past the end", meaning 5-10 expresses a range
270that takes cblock ranges expressed in hexidecimal may be needed to 270of values from 5 to 9. Each cblock must be expressed as a decimal
271better support efficient invalidation of larger caches. The cache must 271value, in the future a variant message that takes cblock ranges
272be in passthrough mode when invalidate_cblocks is used. 272expressed in hexidecimal may be needed to better support efficient
273invalidation of larger caches. The cache must be in passthrough mode
274when invalidate_cblocks is used.
273 275
274 invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]* 276 invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]*
275 277
diff --git a/Documentation/devicetree/bindings/arc/pmu.txt b/Documentation/devicetree/bindings/arc/pmu.txt
new file mode 100644
index 000000000000..49d517340de3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arc/pmu.txt
@@ -0,0 +1,24 @@
1* ARC Performance Monitor Unit
2
3The ARC 700 can be configured with a pipeline performance monitor for counting
4CPU and cache events like cache misses and hits.
5
6Note that:
7 * ARC 700 refers to a family of ARC processor cores;
8 - There is only one type of PMU available for the whole family;
9 - The PMU may support different sets of events; supported events are probed
10 at boot time, as required by the reference manual.
11
12 * The ARC 700 PMU does not support interrupts; although HW events may be
13 counted, the HW events themselves cannot serve as a trigger for a sample.
14
15Required properties:
16
17- compatible : should contain
18 "snps,arc700-pmu"
19
20Example:
21
22pmu {
23 compatible = "snps,arc700-pmu";
24};
diff --git a/Documentation/devicetree/bindings/arm/omap/mpu.txt b/Documentation/devicetree/bindings/arm/omap/mpu.txt
index 1a5a42ce21bb..83f405bde138 100644
--- a/Documentation/devicetree/bindings/arm/omap/mpu.txt
+++ b/Documentation/devicetree/bindings/arm/omap/mpu.txt
@@ -7,10 +7,18 @@ The MPU contain CPUs, GIC, L2 cache and a local PRCM.
7Required properties: 7Required properties:
8- compatible : Should be "ti,omap3-mpu" for OMAP3 8- compatible : Should be "ti,omap3-mpu" for OMAP3
9 Should be "ti,omap4-mpu" for OMAP4 9 Should be "ti,omap4-mpu" for OMAP4
10 Should be "ti,omap5-mpu" for OMAP5
10- ti,hwmods: "mpu" 11- ti,hwmods: "mpu"
11 12
12Examples: 13Examples:
13 14
15- For an OMAP5 SMP system:
16
17mpu {
18 compatible = "ti,omap5-mpu";
19 ti,hwmods = "mpu"
20};
21
14- For an OMAP4 SMP system: 22- For an OMAP4 SMP system:
15 23
16mpu { 24mpu {
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 343781b9f246..3e1e498fea96 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -7,6 +7,7 @@ representation in the device tree should be done as under:-
7Required properties: 7Required properties:
8 8
9- compatible : should be one of 9- compatible : should be one of
10 "arm,armv8-pmuv3"
10 "arm,cortex-a15-pmu" 11 "arm,cortex-a15-pmu"
11 "arm,cortex-a9-pmu" 12 "arm,cortex-a9-pmu"
12 "arm,cortex-a8-pmu" 13 "arm,cortex-a8-pmu"
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
index 47ada1dff216..5d49f2b37f68 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
@@ -49,7 +49,7 @@ adc@12D10000 {
49 /* NTC thermistor is a hwmon device */ 49 /* NTC thermistor is a hwmon device */
50 ncp15wb473@0 { 50 ncp15wb473@0 {
51 compatible = "ntc,ncp15wb473"; 51 compatible = "ntc,ncp15wb473";
52 pullup-uV = <1800000>; 52 pullup-uv = <1800000>;
53 pullup-ohm = <47000>; 53 pullup-ohm = <47000>;
54 pulldown-ohm = <0>; 54 pulldown-ohm = <0>;
55 io-channels = <&adc 4>; 55 io-channels = <&adc 4>;
diff --git a/Documentation/devicetree/bindings/clock/exynos4-clock.txt b/Documentation/devicetree/bindings/clock/exynos4-clock.txt
index c6bf8a6c8f52..a2ac2d9ac71a 100644
--- a/Documentation/devicetree/bindings/clock/exynos4-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos4-clock.txt
@@ -6,7 +6,7 @@ SoC's in the Exynos4 family.
6 6
7Required Properties: 7Required Properties:
8 8
9- comptible: should be one of the following. 9- compatible: should be one of the following.
10 - "samsung,exynos4210-clock" - controller compatible with Exynos4210 SoC. 10 - "samsung,exynos4210-clock" - controller compatible with Exynos4210 SoC.
11 - "samsung,exynos4412-clock" - controller compatible with Exynos4412 SoC. 11 - "samsung,exynos4412-clock" - controller compatible with Exynos4412 SoC.
12 12
diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
index 24765c146e31..46f5c791ea0d 100644
--- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
@@ -5,7 +5,7 @@ controllers within the Exynos5250 SoC.
5 5
6Required Properties: 6Required Properties:
7 7
8- comptible: should be one of the following. 8- compatible: should be one of the following.
9 - "samsung,exynos5250-clock" - controller compatible with Exynos5250 SoC. 9 - "samsung,exynos5250-clock" - controller compatible with Exynos5250 SoC.
10 10
11- reg: physical base address of the controller and length of memory mapped 11- reg: physical base address of the controller and length of memory mapped
diff --git a/Documentation/devicetree/bindings/clock/exynos5420-clock.txt b/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
index 32aa34ecad36..458f34789e5d 100644
--- a/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
@@ -5,7 +5,7 @@ controllers within the Exynos5420 SoC.
5 5
6Required Properties: 6Required Properties:
7 7
8- comptible: should be one of the following. 8- compatible: should be one of the following.
9 - "samsung,exynos5420-clock" - controller compatible with Exynos5420 SoC. 9 - "samsung,exynos5420-clock" - controller compatible with Exynos5420 SoC.
10 10
11- reg: physical base address of the controller and length of memory mapped 11- reg: physical base address of the controller and length of memory mapped
diff --git a/Documentation/devicetree/bindings/clock/exynos5440-clock.txt b/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
index 4499e9966bc9..9955dc9c7d96 100644
--- a/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
@@ -5,7 +5,7 @@ controllers within the Exynos5440 SoC.
5 5
6Required Properties: 6Required Properties:
7 7
8- comptible: should be "samsung,exynos5440-clock". 8- compatible: should be "samsung,exynos5440-clock".
9 9
10- reg: physical base address of the controller and length of memory mapped 10- reg: physical base address of the controller and length of memory mapped
11 region. 11 region.
diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt
index e1f343c7a34b..f69bcf5a6343 100644
--- a/Documentation/devicetree/bindings/dma/atmel-dma.txt
+++ b/Documentation/devicetree/bindings/dma/atmel-dma.txt
@@ -28,7 +28,7 @@ The three cells in order are:
28dependent: 28dependent:
29 - bit 7-0: peripheral identifier for the hardware handshaking interface. The 29 - bit 7-0: peripheral identifier for the hardware handshaking interface. The
30 identifier can be different for tx and rx. 30 identifier can be different for tx and rx.
31 - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP. 31 - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP.
32 32
33Example: 33Example:
34 34
diff --git a/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
index b0019eb5330e..798cfc9d3839 100644
--- a/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
@@ -5,16 +5,42 @@ This is for the non-QE/CPM/GUTs GPIO controllers as found on
5 5
6Every GPIO controller node must have #gpio-cells property defined, 6Every GPIO controller node must have #gpio-cells property defined,
7this information will be used to translate gpio-specifiers. 7this information will be used to translate gpio-specifiers.
8See bindings/gpio/gpio.txt for details of how to specify GPIO
9information for devices.
10
11The GPIO module usually is connected to the SoC's internal interrupt
12controller, see bindings/interrupt-controller/interrupts.txt (the
13interrupt client nodes section) for details how to specify this GPIO
14module's interrupt.
15
16The GPIO module may serve as another interrupt controller (cascaded to
17the SoC's internal interrupt controller). See the interrupt controller
18nodes section in bindings/interrupt-controller/interrupts.txt for
19details.
8 20
9Required properties: 21Required properties:
10- compatible : "fsl,<CHIP>-gpio" followed by "fsl,mpc8349-gpio" for 22- compatible: "fsl,<chip>-gpio" followed by "fsl,mpc8349-gpio"
11 83xx, "fsl,mpc8572-gpio" for 85xx and "fsl,mpc8610-gpio" for 86xx. 23 for 83xx, "fsl,mpc8572-gpio" for 85xx, or
12- #gpio-cells : Should be two. The first cell is the pin number and the 24 "fsl,mpc8610-gpio" for 86xx.
13 second cell is used to specify optional parameters (currently unused). 25- #gpio-cells: Should be two. The first cell is the pin number
14 - interrupts : Interrupt mapping for GPIO IRQ. 26 and the second cell is used to specify optional
15 - interrupt-parent : Phandle for the interrupt controller that 27 parameters (currently unused).
16 services interrupts for this device. 28- interrupt-parent: Phandle for the interrupt controller that
17- gpio-controller : Marks the port as GPIO controller. 29 services interrupts for this device.
30- interrupts: Interrupt mapping for GPIO IRQ.
31- gpio-controller: Marks the port as GPIO controller.
32
33Optional properties:
34- interrupt-controller: Empty boolean property which marks the GPIO
35 module as an IRQ controller.
36- #interrupt-cells: Should be two. Defines the number of integer
37 cells required to specify an interrupt within
38 this interrupt controller. The first cell
39 defines the pin number, the second cell
40 defines additional flags (trigger type,
41 trigger polarity). Note that the available
42 set of trigger conditions supported by the
43 GPIO module depends on the actual SoC.
18 44
19Example of gpio-controller nodes for a MPC8347 SoC: 45Example of gpio-controller nodes for a MPC8347 SoC:
20 46
@@ -22,39 +48,27 @@ Example of gpio-controller nodes for a MPC8347 SoC:
22 #gpio-cells = <2>; 48 #gpio-cells = <2>;
23 compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio"; 49 compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio";
24 reg = <0xc00 0x100>; 50 reg = <0xc00 0x100>;
25 interrupts = <74 0x8>;
26 interrupt-parent = <&ipic>; 51 interrupt-parent = <&ipic>;
52 interrupts = <74 0x8>;
27 gpio-controller; 53 gpio-controller;
54 interrupt-controller;
55 #interrupt-cells = <2>;
28 }; 56 };
29 57
30 gpio2: gpio-controller@d00 { 58 gpio2: gpio-controller@d00 {
31 #gpio-cells = <2>; 59 #gpio-cells = <2>;
32 compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio"; 60 compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio";
33 reg = <0xd00 0x100>; 61 reg = <0xd00 0x100>;
34 interrupts = <75 0x8>;
35 interrupt-parent = <&ipic>; 62 interrupt-parent = <&ipic>;
63 interrupts = <75 0x8>;
36 gpio-controller; 64 gpio-controller;
37 }; 65 };
38 66
39See booting-without-of.txt for details of how to specify GPIO 67Example of a peripheral using the GPIO module as an IRQ controller:
40information for devices.
41
42To use GPIO pins as interrupt sources for peripherals, specify the
43GPIO controller as the interrupt parent and define GPIO number +
44trigger mode using the interrupts property, which is defined like
45this:
46
47interrupts = <number trigger>, where:
48 - number: GPIO pin (0..31)
49 - trigger: trigger mode:
50 2 = trigger on falling edge
51 3 = trigger on both edges
52
53Example of device using this is:
54 68
55 funkyfpga@0 { 69 funkyfpga@0 {
56 compatible = "funky-fpga"; 70 compatible = "funky-fpga";
57 ... 71 ...
58 interrupts = <4 3>;
59 interrupt-parent = <&gpio1>; 72 interrupt-parent = <&gpio1>;
73 interrupts = <4 3>;
60 }; 74 };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-omap.txt b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
index 56564aa4b444..7e49839d4124 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-omap.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
@@ -1,7 +1,8 @@
1I2C for OMAP platforms 1I2C for OMAP platforms
2 2
3Required properties : 3Required properties :
4- compatible : Must be "ti,omap3-i2c" or "ti,omap4-i2c" 4- compatible : Must be "ti,omap2420-i2c", "ti,omap2430-i2c", "ti,omap3-i2c"
5 or "ti,omap4-i2c"
5- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based) 6- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
6- #address-cells = <1>; 7- #address-cells = <1>;
7- #size-cells = <0>; 8- #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index ad6a73852f08..b1cb3415e6f1 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -15,6 +15,7 @@ adi,adt7461 +/-1C TDM Extended Temp Range I.C
15adt7461 +/-1C TDM Extended Temp Range I.C 15adt7461 +/-1C TDM Extended Temp Range I.C
16at,24c08 i2c serial eeprom (24cxx) 16at,24c08 i2c serial eeprom (24cxx)
17atmel,24c02 i2c serial eeprom (24cxx) 17atmel,24c02 i2c serial eeprom (24cxx)
18atmel,at97sc3204t i2c trusted platform module (TPM)
18catalyst,24c32 i2c serial eeprom 19catalyst,24c32 i2c serial eeprom
19dallas,ds1307 64 x 8, Serial, I2C Real-Time Clock 20dallas,ds1307 64 x 8, Serial, I2C Real-Time Clock
20dallas,ds1338 I2C RTC with 56-Byte NV RAM 21dallas,ds1338 I2C RTC with 56-Byte NV RAM
@@ -35,6 +36,7 @@ fsl,mc13892 MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
35fsl,mma8450 MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer 36fsl,mma8450 MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
36fsl,mpr121 MPR121: Proximity Capacitive Touch Sensor Controller 37fsl,mpr121 MPR121: Proximity Capacitive Touch Sensor Controller
37fsl,sgtl5000 SGTL5000: Ultra Low-Power Audio Codec 38fsl,sgtl5000 SGTL5000: Ultra Low-Power Audio Codec
39gmt,g751 G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
38infineon,slb9635tt Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz) 40infineon,slb9635tt Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
39infineon,slb9645tt Infineon SLB9645 I2C TPM (new protocol, max 400khz) 41infineon,slb9645tt Infineon SLB9645 I2C TPM (new protocol, max 400khz)
40maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator 42maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator
@@ -44,6 +46,7 @@ mc,rv3029c2 Real Time Clock Module with I2C-Bus
44national,lm75 I2C TEMP SENSOR 46national,lm75 I2C TEMP SENSOR
45national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor 47national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
46national,lm92 ±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator with Two-Wire Interface 48national,lm92 ±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator with Two-Wire Interface
49nuvoton,npct501 i2c trusted platform module (TPM)
47nxp,pca9556 Octal SMBus and I2C registered interface 50nxp,pca9556 Octal SMBus and I2C registered interface
48nxp,pca9557 8-bit I2C-bus and SMBus I/O port with reset 51nxp,pca9557 8-bit I2C-bus and SMBus I/O port with reset
49nxp,pcf8563 Real-time clock/calendar 52nxp,pcf8563 Real-time clock/calendar
@@ -61,3 +64,4 @@ taos,tsl2550 Ambient Light Sensor with SMBUS/Two Wire Serial Interface
61ti,tsc2003 I2C Touch-Screen Controller 64ti,tsc2003 I2C Touch-Screen Controller
62ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface 65ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
63ti,tmp275 Digital Temperature Sensor 66ti,tmp275 Digital Temperature Sensor
67winbond,wpct301 i2c trusted platform module (TPM)
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap.txt b/Documentation/devicetree/bindings/mmc/ti-omap.txt
new file mode 100644
index 000000000000..8de579969763
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/ti-omap.txt
@@ -0,0 +1,54 @@
1* TI MMC host controller for OMAP1 and 2420
2
3The MMC Host Controller on TI OMAP1 and 2420 family provides
4an interface for MMC, SD, and SDIO types of memory cards.
5
6This file documents differences between the core properties described
7by mmc.txt and the properties used by the omap mmc driver.
8
9Note that this driver will not work with omap2430 or later omaps,
10please see the omap hsmmc driver for the current omaps.
11
12Required properties:
13- compatible: Must be "ti,omap2420-mmc", for OMAP2420 controllers
14- ti,hwmods: For 2420, must be "msdi<n>", where n is controller
15 instance starting 1
16
17Examples:
18
19 msdi1: mmc@4809c000 {
20 compatible = "ti,omap2420-mmc";
21 ti,hwmods = "msdi1";
22 reg = <0x4809c000 0x80>;
23 interrupts = <83>;
24 dmas = <&sdma 61 &sdma 62>;
25 dma-names = "tx", "rx";
26 };
27
28* TI MMC host controller for OMAP1 and 2420
29
30The MMC Host Controller on TI OMAP1 and 2420 family provides
31an interface for MMC, SD, and SDIO types of memory cards.
32
33This file documents differences between the core properties described
34by mmc.txt and the properties used by the omap mmc driver.
35
36Note that this driver will not work with omap2430 or later omaps,
37please see the omap hsmmc driver for the current omaps.
38
39Required properties:
40- compatible: Must be "ti,omap2420-mmc", for OMAP2420 controllers
41- ti,hwmods: For 2420, must be "msdi<n>", where n is controller
42 instance starting 1
43
44Examples:
45
46 msdi1: mmc@4809c000 {
47 compatible = "ti,omap2420-mmc";
48 ti,hwmods = "msdi1";
49 reg = <0x4809c000 0x80>;
50 interrupts = <83>;
51 dmas = <&sdma 61 &sdma 62>;
52 dma-names = "tx", "rx";
53 };
54
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 48b259e29e87..bad381faf036 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -4,7 +4,7 @@ This file provides information, what the device node
4for the davinci_emac interface contains. 4for the davinci_emac interface contains.
5 5
6Required properties: 6Required properties:
7- compatible: "ti,davinci-dm6467-emac"; 7- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
8- reg: Offset and length of the register set for the device 8- reg: Offset and length of the register set for the device
9- ti,davinci-ctrl-reg-offset: offset to control register 9- ti,davinci-ctrl-reg-offset: offset to control register
10- ti,davinci-ctrl-mod-reg-offset: offset to control module register 10- ti,davinci-ctrl-mod-reg-offset: offset to control module register
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index d53639221403..845ff848d895 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -15,6 +15,7 @@ Optional properties:
15 only if property "phy-reset-gpios" is available. Missing the property 15 only if property "phy-reset-gpios" is available. Missing the property
16 will have the duration be 1 millisecond. Numbers greater than 1000 are 16 will have the duration be 1 millisecond. Numbers greater than 1000 are
17 invalid and 1 millisecond will be used instead. 17 invalid and 1 millisecond will be used instead.
18- phy-supply: regulator that powers the Ethernet PHY.
18 19
19Example: 20Example:
20 21
@@ -25,4 +26,5 @@ ethernet@83fec000 {
25 phy-mode = "mii"; 26 phy-mode = "mii";
26 phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */ 27 phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
27 local-mac-address = [00 04 9F 01 1B B9]; 28 local-mac-address = [00 04 9F 01 1B B9];
29 phy-supply = <&reg_fec_supply>;
28}; 30};
diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
index 953049b4248a..5a41a8658daa 100644
--- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
+++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
@@ -8,3 +8,7 @@ Required properties:
8Optional properties: 8Optional properties:
9- phy-device : phandle to Ethernet phy 9- phy-device : phandle to Ethernet phy
10- local-mac-address : Ethernet mac address to use 10- local-mac-address : Ethernet mac address to use
11- reg-io-width : Mask of sizes (in bytes) of the IO accesses that
12 are supported on the device. Valid value for SMSC LAN91c111 are
13 1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning
14 16-bit access only.
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
index 2a4b4bce6110..7fc1b010fa75 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
@@ -1,33 +1,30 @@
1* Freescale 83xx DMA Controller 1* Freescale DMA Controllers
2 2
3Freescale PowerPC 83xx have on chip general purpose DMA controllers. 3** Freescale Elo DMA Controller
4 This is a little-endian 4-channel DMA controller, used in Freescale mpc83xx
5 series chips such as mpc8315, mpc8349, mpc8379 etc.
4 6
5Required properties: 7Required properties:
6 8
7- compatible : compatible list, contains 2 entries, first is 9- compatible : must include "fsl,elo-dma"
8 "fsl,CHIP-dma", where CHIP is the processor 10- reg : DMA General Status Register, i.e. DGSR which contains
9 (mpc8349, mpc8360, etc.) and the second is 11 status for all the 4 DMA channels
10 "fsl,elo-dma" 12- ranges : describes the mapping between the address space of the
11- reg : <registers mapping for DMA general status reg> 13 DMA channels and the address space of the DMA controller
12- ranges : Should be defined as specified in 1) to describe the
13 DMA controller channels.
14- cell-index : controller index. 0 for controller @ 0x8100 14- cell-index : controller index. 0 for controller @ 0x8100
15- interrupts : <interrupt mapping for DMA IRQ> 15- interrupts : interrupt specifier for DMA IRQ
16- interrupt-parent : optional, if needed for interrupt mapping 16- interrupt-parent : optional, if needed for interrupt mapping
17 17
18
19- DMA channel nodes: 18- DMA channel nodes:
20 - compatible : compatible list, contains 2 entries, first is 19 - compatible : must include "fsl,elo-dma-channel"
21 "fsl,CHIP-dma-channel", where CHIP is the processor 20 However, see note below.
22 (mpc8349, mpc8350, etc.) and the second is 21 - reg : DMA channel specific registers
23 "fsl,elo-dma-channel". However, see note below. 22 - cell-index : DMA channel index starts at 0.
24 - reg : <registers mapping for channel>
25 - cell-index : dma channel index starts at 0.
26 23
27Optional properties: 24Optional properties:
28 - interrupts : <interrupt mapping for DMA channel IRQ> 25 - interrupts : interrupt specifier for DMA channel IRQ
29 (on 83xx this is expected to be identical to 26 (on 83xx this is expected to be identical to
30 the interrupts property of the parent node) 27 the interrupts property of the parent node)
31 - interrupt-parent : optional, if needed for interrupt mapping 28 - interrupt-parent : optional, if needed for interrupt mapping
32 29
33Example: 30Example:
@@ -70,30 +67,27 @@ Example:
70 }; 67 };
71 }; 68 };
72 69
73* Freescale 85xx/86xx DMA Controller 70** Freescale EloPlus DMA Controller
74 71 This is a 4-channel DMA controller with extended addresses and chaining,
75Freescale PowerPC 85xx/86xx have on chip general purpose DMA controllers. 72 mainly used in Freescale mpc85xx/86xx, Pxxx and BSC series chips, such as
73 mpc8540, mpc8641 p4080, bsc9131 etc.
76 74
77Required properties: 75Required properties:
78 76
79- compatible : compatible list, contains 2 entries, first is 77- compatible : must include "fsl,eloplus-dma"
80 "fsl,CHIP-dma", where CHIP is the processor 78- reg : DMA General Status Register, i.e. DGSR which contains
81 (mpc8540, mpc8540, etc.) and the second is 79 status for all the 4 DMA channels
82 "fsl,eloplus-dma"
83- reg : <registers mapping for DMA general status reg>
84- cell-index : controller index. 0 for controller @ 0x21000, 80- cell-index : controller index. 0 for controller @ 0x21000,
85 1 for controller @ 0xc000 81 1 for controller @ 0xc000
86- ranges : Should be defined as specified in 1) to describe the 82- ranges : describes the mapping between the address space of the
87 DMA controller channels. 83 DMA channels and the address space of the DMA controller
88 84
89- DMA channel nodes: 85- DMA channel nodes:
90 - compatible : compatible list, contains 2 entries, first is 86 - compatible : must include "fsl,eloplus-dma-channel"
91 "fsl,CHIP-dma-channel", where CHIP is the processor 87 However, see note below.
92 (mpc8540, mpc8560, etc.) and the second is 88 - cell-index : DMA channel index starts at 0.
93 "fsl,eloplus-dma-channel". However, see note below. 89 - reg : DMA channel specific registers
94 - cell-index : dma channel index starts at 0. 90 - interrupts : interrupt specifier for DMA channel IRQ
95 - reg : <registers mapping for channel>
96 - interrupts : <interrupt mapping for DMA channel IRQ>
97 - interrupt-parent : optional, if needed for interrupt mapping 91 - interrupt-parent : optional, if needed for interrupt mapping
98 92
99Example: 93Example:
@@ -134,6 +128,76 @@ Example:
134 }; 128 };
135 }; 129 };
136 130
131** Freescale Elo3 DMA Controller
132 DMA controller which has same function as EloPlus except that Elo3 has 8
133 channels while EloPlus has only 4, it is used in Freescale Txxx and Bxxx
134 series chips, such as t1040, t4240, b4860.
135
136Required properties:
137
138- compatible : must include "fsl,elo3-dma"
139- reg : contains two entries for DMA General Status Registers,
140 i.e. DGSR0 which includes status for channel 1~4, and
141 DGSR1 for channel 5~8
142- ranges : describes the mapping between the address space of the
143 DMA channels and the address space of the DMA controller
144
145- DMA channel nodes:
146 - compatible : must include "fsl,eloplus-dma-channel"
147 - reg : DMA channel specific registers
148 - interrupts : interrupt specifier for DMA channel IRQ
149 - interrupt-parent : optional, if needed for interrupt mapping
150
151Example:
152dma@100300 {
153 #address-cells = <1>;
154 #size-cells = <1>;
155 compatible = "fsl,elo3-dma";
156 reg = <0x100300 0x4>,
157 <0x100600 0x4>;
158 ranges = <0x0 0x100100 0x500>;
159 dma-channel@0 {
160 compatible = "fsl,eloplus-dma-channel";
161 reg = <0x0 0x80>;
162 interrupts = <28 2 0 0>;
163 };
164 dma-channel@80 {
165 compatible = "fsl,eloplus-dma-channel";
166 reg = <0x80 0x80>;
167 interrupts = <29 2 0 0>;
168 };
169 dma-channel@100 {
170 compatible = "fsl,eloplus-dma-channel";
171 reg = <0x100 0x80>;
172 interrupts = <30 2 0 0>;
173 };
174 dma-channel@180 {
175 compatible = "fsl,eloplus-dma-channel";
176 reg = <0x180 0x80>;
177 interrupts = <31 2 0 0>;
178 };
179 dma-channel@300 {
180 compatible = "fsl,eloplus-dma-channel";
181 reg = <0x300 0x80>;
182 interrupts = <76 2 0 0>;
183 };
184 dma-channel@380 {
185 compatible = "fsl,eloplus-dma-channel";
186 reg = <0x380 0x80>;
187 interrupts = <77 2 0 0>;
188 };
189 dma-channel@400 {
190 compatible = "fsl,eloplus-dma-channel";
191 reg = <0x400 0x80>;
192 interrupts = <78 2 0 0>;
193 };
194 dma-channel@480 {
195 compatible = "fsl,eloplus-dma-channel";
196 reg = <0x480 0x80>;
197 interrupts = <79 2 0 0>;
198 };
199};
200
137Note on DMA channel compatible properties: The compatible property must say 201Note on DMA channel compatible properties: The compatible property must say
138"fsl,elo-dma-channel" or "fsl,eloplus-dma-channel" to be used by the Elo DMA 202"fsl,elo-dma-channel" or "fsl,eloplus-dma-channel" to be used by the Elo DMA
139driver (fsldma). Any DMA channel used by fsldma cannot be used by another 203driver (fsldma). Any DMA channel used by fsldma cannot be used by another
diff --git a/Documentation/devicetree/bindings/rng/qcom,prng.txt b/Documentation/devicetree/bindings/rng/qcom,prng.txt
new file mode 100644
index 000000000000..8e5853c2879b
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/qcom,prng.txt
@@ -0,0 +1,17 @@
1Qualcomm MSM pseudo random number generator.
2
3Required properties:
4
5- compatible : should be "qcom,prng"
6- reg : specifies base physical address and size of the registers map
7- clocks : phandle to clock-controller plus clock-specifier pair
8- clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block
9
10Example:
11
12 rng@f9bff000 {
13 compatible = "qcom,prng";
14 reg = <0xf9bff000 0x200>;
15 clocks = <&clock GCC_PRNG_AHB_CLK>;
16 clock-names = "core";
17 };
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt
deleted file mode 100644
index 6b9e51896693..000000000000
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt
+++ /dev/null
@@ -1,5 +0,0 @@
1NVIDIA Tegra 2 SPI device
2
3Required properties:
4- compatible : should be "nvidia,tegra20-spi".
5- gpios : should specify GPIOs used for chipselect.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index ce95ed1c6d3e..edbb8d88c85e 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -32,12 +32,14 @@ est ESTeem Wireless Modems
32fsl Freescale Semiconductor 32fsl Freescale Semiconductor
33GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc. 33GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
34gef GE Fanuc Intelligent Platforms Embedded Systems, Inc. 34gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
35gmt Global Mixed-mode Technology, Inc.
35hisilicon Hisilicon Limited. 36hisilicon Hisilicon Limited.
36hp Hewlett Packard 37hp Hewlett Packard
37ibm International Business Machines (IBM) 38ibm International Business Machines (IBM)
38idt Integrated Device Technologies, Inc. 39idt Integrated Device Technologies, Inc.
39img Imagination Technologies Ltd. 40img Imagination Technologies Ltd.
40intercontrol Inter Control Group 41intercontrol Inter Control Group
42lg LG Corporation
41linux Linux-specific binding 43linux Linux-specific binding
42lsi LSI Corp. (LSI Logic) 44lsi LSI Corp. (LSI Logic)
43marvell Marvell Technology Group Ltd. 45marvell Marvell Technology Group Ltd.
diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt
index a2b5663eae26..dd77a81bdb80 100644
--- a/Documentation/dmatest.txt
+++ b/Documentation/dmatest.txt
@@ -15,39 +15,48 @@ be built as module or inside kernel. Let's consider those cases.
15 15
16 Part 2 - When dmatest is built as a module... 16 Part 2 - When dmatest is built as a module...
17 17
18After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest
19folder with nodes will be created. There are two important files located. First
20is the 'run' node that controls run and stop phases of the test, and the second
21one, 'results', is used to get the test case results.
22
23Note that in this case test will not run on load automatically.
24
25Example of usage: 18Example of usage:
19 % modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1
20
21...or:
22 % modprobe dmatest
26 % echo dma0chan0 > /sys/module/dmatest/parameters/channel 23 % echo dma0chan0 > /sys/module/dmatest/parameters/channel
27 % echo 2000 > /sys/module/dmatest/parameters/timeout 24 % echo 2000 > /sys/module/dmatest/parameters/timeout
28 % echo 1 > /sys/module/dmatest/parameters/iterations 25 % echo 1 > /sys/module/dmatest/parameters/iterations
29 % echo 1 > /sys/kernel/debug/dmatest/run 26 % echo 1 > /sys/module/dmatest/parameters/run
27
28...or on the kernel command line:
29
30 dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1
30 31
31Hint: available channel list could be extracted by running the following 32Hint: available channel list could be extracted by running the following
32command: 33command:
33 % ls -1 /sys/class/dma/ 34 % ls -1 /sys/class/dma/
34 35
35After a while you will start to get messages about current status or error like 36Once started a message like "dmatest: Started 1 threads using dma0chan0" is
36in the original code. 37emitted. After that only test failure messages are reported until the test
38stops.
37 39
38Note that running a new test will not stop any in progress test. 40Note that running a new test will not stop any in progress test.
39 41
40The following command should return actual state of the test. 42The following command returns the state of the test.
41 % cat /sys/kernel/debug/dmatest/run 43 % cat /sys/module/dmatest/parameters/run
42 44
43To wait for test done the user may perform a busy loop that checks the state. 45To wait for test completion userpace can poll 'run' until it is false, or use
44 46the wait parameter. Specifying 'wait=1' when loading the module causes module
45 % while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ] 47initialization to pause until a test run has completed, while reading
46 > do 48/sys/module/dmatest/parameters/wait waits for any running test to complete
47 > echo -n "." 49before returning. For example, the following scripts wait for 42 tests
48 > sleep 1 50to complete before exiting. Note that if 'iterations' is set to 'infinite' then
49 > done 51waiting is disabled.
50 > echo 52
53Example:
54 % modprobe dmatest run=1 iterations=42 wait=1
55 % modprobe -r dmatest
56...or:
57 % modprobe dmatest run=1 iterations=42
58 % cat /sys/module/dmatest/parameters/wait
59 % modprobe -r dmatest
51 60
52 Part 3 - When built-in in the kernel... 61 Part 3 - When built-in in the kernel...
53 62
@@ -62,21 +71,22 @@ case. You always could check them at run-time by running
62 71
63 Part 4 - Gathering the test results 72 Part 4 - Gathering the test results
64 73
65The module provides a storage for the test results in the memory. The gathered 74Test results are printed to the kernel log buffer with the format:
66data could be used after test is done.
67 75
68The special file 'results' in the debugfs represents gathered data of the in 76"dmatest: result <channel>: <test id>: '<error msg>' with src_off=<val> dst_off=<val> len=<val> (<err code>)"
69progress test. The messages collected are printed to the kernel log as well.
70 77
71Example of output: 78Example of output:
72 % cat /sys/kernel/debug/dmatest/results 79 % dmesg | tail -n 1
73 dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) 80 dmatest: result dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
74 81
75The message format is unified across the different types of errors. A number in 82The message format is unified across the different types of errors. A number in
76the parens represents additional information, e.g. error code, error counter, 83the parens represents additional information, e.g. error code, error counter,
77or status. 84or status. A test thread also emits a summary line at completion listing the
85number of tests executed, number that failed, and a result code.
78 86
79Comparison between buffers is stored to the dedicated structure. 87Example:
88 % dmesg | tail -n 1
89 dmatest: dma0chan0-copy0: summary 1 test, 0 failures 1000 iops 100000 KB/s (0)
80 90
81Note that the verify result is now accessible only via file 'results' in the 91The details of a data miscompare error are also emitted, but do not follow the
82debugfs. 92above format.
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
index 9dae59407437..5dd282dda55c 100644
--- a/Documentation/filesystems/btrfs.txt
+++ b/Documentation/filesystems/btrfs.txt
@@ -70,6 +70,12 @@ Unless otherwise specified, all options default to off.
70 70
71 See comments at the top of fs/btrfs/check-integrity.c for more info. 71 See comments at the top of fs/btrfs/check-integrity.c for more info.
72 72
73 commit=<seconds>
74 Set the interval of periodic commit, 30 seconds by default. Higher
75 values defer data being synced to permanent storage with obvious
76 consequences when the system crashes. The upper bound is not forced,
77 but a warning is printed if it's more than 300 seconds (5 minutes).
78
73 compress 79 compress
74 compress=<type> 80 compress=<type>
75 compress-force 81 compress-force
@@ -154,7 +160,11 @@ Unless otherwise specified, all options default to off.
154 Currently this scans a list of several previous tree roots and tries to 160 Currently this scans a list of several previous tree roots and tries to
155 use the first readable. 161 use the first readable.
156 162
157 skip_balance 163 rescan_uuid_tree
164 Force check and rebuild procedure of the UUID tree. This should not
165 normally be needed.
166
167 skip_balance
158 Skip automatic resume of interrupted balance operation after mount. 168 Skip automatic resume of interrupted balance operation after mount.
159 May be resumed with "btrfs balance resume." 169 May be resumed with "btrfs balance resume."
160 170
@@ -234,24 +244,14 @@ available from the git repository at the following location:
234 244
235These include the following tools: 245These include the following tools:
236 246
237mkfs.btrfs: create a filesystem 247* mkfs.btrfs: create a filesystem
238
239btrfsctl: control program to create snapshots and subvolumes:
240 248
241 mount /dev/sda2 /mnt 249* btrfs: a single tool to manage the filesystems, refer to the manpage for more details
242 btrfsctl -s new_subvol_name /mnt
243 btrfsctl -s snapshot_of_default /mnt/default
244 btrfsctl -s snapshot_of_new_subvol /mnt/new_subvol_name
245 btrfsctl -s snapshot_of_a_snapshot /mnt/snapshot_of_new_subvol
246 ls /mnt
247 default snapshot_of_a_snapshot snapshot_of_new_subvol
248 new_subvol_name snapshot_of_default
249 250
250 Snapshots and subvolumes cannot be deleted right now, but you can 251* 'btrfsck' or 'btrfs check': do a consistency check of the filesystem
251 rm -rf all the files and directories inside them.
252 252
253btrfsck: do a limited check of the FS extent trees. 253Other tools for specific tasks:
254 254
255btrfs-debug-tree: print all of the FS metadata in text form. Example: 255* btrfs-convert: in-place conversion from ext2/3/4 filesystems
256 256
257 btrfs-debug-tree /dev/sda2 >& big_output_file 257* btrfs-image: dump filesystem metadata for debugging
diff --git a/Documentation/gpio/00-INDEX b/Documentation/gpio/00-INDEX
new file mode 100644
index 000000000000..1de43ae46ae6
--- /dev/null
+++ b/Documentation/gpio/00-INDEX
@@ -0,0 +1,14 @@
100-INDEX
2 - This file
3gpio.txt
4 - Introduction to GPIOs and their kernel interfaces
5consumer.txt
6 - How to obtain and use GPIOs in a driver
7driver.txt
8 - How to write a GPIO driver
9board.txt
10 - How to assign GPIOs to a consumer device and a function
11sysfs.txt
12 - Information about the GPIO sysfs interface
13gpio-legacy.txt
14 - Historical documentation of the deprecated GPIO integer interface
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
new file mode 100644
index 000000000000..0d03506f2cc5
--- /dev/null
+++ b/Documentation/gpio/board.txt
@@ -0,0 +1,115 @@
1GPIO Mappings
2=============
3
4This document explains how GPIOs can be assigned to given devices and functions.
5Note that it only applies to the new descriptor-based interface. For a
6description of the deprecated integer-based GPIO interface please refer to
7gpio-legacy.txt (actually, there is no real mapping possible with the old
8interface; you just fetch an integer from somewhere and request the
9corresponding GPIO.
10
11Platforms that make use of GPIOs must select ARCH_REQUIRE_GPIOLIB (if GPIO usage
12is mandatory) or ARCH_WANT_OPTIONAL_GPIOLIB (if GPIO support can be omitted) in
13their Kconfig. Then, how GPIOs are mapped depends on what the platform uses to
14describe its hardware layout. Currently, mappings can be defined through device
15tree, ACPI, and platform data.
16
17Device Tree
18-----------
19GPIOs can easily be mapped to devices and functions in the device tree. The
20exact way to do it depends on the GPIO controller providing the GPIOs, see the
21device tree bindings for your controller.
22
23GPIOs mappings are defined in the consumer device's node, in a property named
24<function>-gpios, where <function> is the function the driver will request
25through gpiod_get(). For example:
26
27 foo_device {
28 compatible = "acme,foo";
29 ...
30 led-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>, /* red */
31 <&gpio 16 GPIO_ACTIVE_HIGH>, /* green */
32 <&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */
33
34 power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>;
35 };
36
37This property will make GPIOs 15, 16 and 17 available to the driver under the
38"led" function, and GPIO 1 as the "power" GPIO:
39
40 struct gpio_desc *red, *green, *blue, *power;
41
42 red = gpiod_get_index(dev, "led", 0);
43 green = gpiod_get_index(dev, "led", 1);
44 blue = gpiod_get_index(dev, "led", 2);
45
46 power = gpiod_get(dev, "power");
47
48The led GPIOs will be active-high, while the power GPIO will be active-low (i.e.
49gpiod_is_active_low(power) will be true).
50
51ACPI
52----
53ACPI does not support function names for GPIOs. Therefore, only the "idx"
54argument of gpiod_get_index() is useful to discriminate between GPIOs assigned
55to a device. The "con_id" argument can still be set for debugging purposes (it
56will appear under error messages as well as debug and sysfs nodes).
57
58Platform Data
59-------------
60Finally, GPIOs can be bound to devices and functions using platform data. Board
61files that desire to do so need to include the following header:
62
63 #include <linux/gpio/driver.h>
64
65GPIOs are mapped by the means of tables of lookups, containing instances of the
66gpiod_lookup structure. Two macros are defined to help declaring such mappings:
67
68 GPIO_LOOKUP(chip_label, chip_hwnum, dev_id, con_id, flags)
69 GPIO_LOOKUP_IDX(chip_label, chip_hwnum, dev_id, con_id, idx, flags)
70
71where
72
73 - chip_label is the label of the gpiod_chip instance providing the GPIO
74 - chip_hwnum is the hardware number of the GPIO within the chip
75 - dev_id is the identifier of the device that will make use of this GPIO. If
76 NULL, the GPIO will be available to all devices.
77 - con_id is the name of the GPIO function from the device point of view. It
78 can be NULL.
79 - idx is the index of the GPIO within the function.
80 - flags is defined to specify the following properties:
81 * GPIOF_ACTIVE_LOW - to configure the GPIO as active-low
82 * GPIOF_OPEN_DRAIN - GPIO pin is open drain type.
83 * GPIOF_OPEN_SOURCE - GPIO pin is open source type.
84
85In the future, these flags might be extended to support more properties.
86
87Note that GPIO_LOOKUP() is just a shortcut to GPIO_LOOKUP_IDX() where idx = 0.
88
89A lookup table can then be defined as follows:
90
91 struct gpiod_lookup gpios_table[] = {
92 GPIO_LOOKUP_IDX("gpio.0", 15, "foo.0", "led", 0, GPIO_ACTIVE_HIGH),
93 GPIO_LOOKUP_IDX("gpio.0", 16, "foo.0", "led", 1, GPIO_ACTIVE_HIGH),
94 GPIO_LOOKUP_IDX("gpio.0", 17, "foo.0", "led", 2, GPIO_ACTIVE_HIGH),
95 GPIO_LOOKUP("gpio.0", 1, "foo.0", "power", GPIO_ACTIVE_LOW),
96 };
97
98And the table can be added by the board code as follows:
99
100 gpiod_add_table(gpios_table, ARRAY_SIZE(gpios_table));
101
102The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
103
104 struct gpio_desc *red, *green, *blue, *power;
105
106 red = gpiod_get_index(dev, "led", 0);
107 green = gpiod_get_index(dev, "led", 1);
108 blue = gpiod_get_index(dev, "led", 2);
109
110 power = gpiod_get(dev, "power");
111 gpiod_direction_output(power, 1);
112
113Since the "power" GPIO is mapped as active-low, its actual signal will be 0
114after this code. Contrary to the legacy integer GPIO interface, the active-low
115property is handled during mapping and is thus transparent to GPIO consumers.
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
new file mode 100644
index 000000000000..07c74a3765a0
--- /dev/null
+++ b/Documentation/gpio/consumer.txt
@@ -0,0 +1,197 @@
1GPIO Descriptor Consumer Interface
2==================================
3
4This document describes the consumer interface of the GPIO framework. Note that
5it describes the new descriptor-based interface. For a description of the
6deprecated integer-based GPIO interface please refer to gpio-legacy.txt.
7
8
9Guidelines for GPIOs consumers
10==============================
11
12Drivers that can't work without standard GPIO calls should have Kconfig entries
13that depend on GPIOLIB. The functions that allow a driver to obtain and use
14GPIOs are available by including the following file:
15
16 #include <linux/gpio/consumer.h>
17
18All the functions that work with the descriptor-based GPIO interface are
19prefixed with gpiod_. The gpio_ prefix is used for the legacy interface. No
20other function in the kernel should use these prefixes.
21
22
23Obtaining and Disposing GPIOs
24=============================
25
26With the descriptor-based interface, GPIOs are identified with an opaque,
27non-forgeable handler that must be obtained through a call to one of the
28gpiod_get() functions. Like many other kernel subsystems, gpiod_get() takes the
29device that will use the GPIO and the function the requested GPIO is supposed to
30fulfill:
31
32 struct gpio_desc *gpiod_get(struct device *dev, const char *con_id)
33
34If a function is implemented by using several GPIOs together (e.g. a simple LED
35device that displays digits), an additional index argument can be specified:
36
37 struct gpio_desc *gpiod_get_index(struct device *dev,
38 const char *con_id, unsigned int idx)
39
40Both functions return either a valid GPIO descriptor, or an error code checkable
41with IS_ERR(). They will never return a NULL pointer.
42
43Device-managed variants of these functions are also defined:
44
45 struct gpio_desc *devm_gpiod_get(struct device *dev, const char *con_id)
46
47 struct gpio_desc *devm_gpiod_get_index(struct device *dev,
48 const char *con_id,
49 unsigned int idx)
50
51A GPIO descriptor can be disposed of using the gpiod_put() function:
52
53 void gpiod_put(struct gpio_desc *desc)
54
55It is strictly forbidden to use a descriptor after calling this function. The
56device-managed variant is, unsurprisingly:
57
58 void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
59
60
61Using GPIOs
62===========
63
64Setting Direction
65-----------------
66The first thing a driver must do with a GPIO is setting its direction. This is
67done by invoking one of the gpiod_direction_*() functions:
68
69 int gpiod_direction_input(struct gpio_desc *desc)
70 int gpiod_direction_output(struct gpio_desc *desc, int value)
71
72The return value is zero for success, else a negative errno. It should be
73checked, since the get/set calls don't return errors and since misconfiguration
74is possible. You should normally issue these calls from a task context. However,
75for spinlock-safe GPIOs it is OK to use them before tasking is enabled, as part
76of early board setup.
77
78For output GPIOs, the value provided becomes the initial output value. This
79helps avoid signal glitching during system startup.
80
81A driver can also query the current direction of a GPIO:
82
83 int gpiod_get_direction(const struct gpio_desc *desc)
84
85This function will return either GPIOF_DIR_IN or GPIOF_DIR_OUT.
86
87Be aware that there is no default direction for GPIOs. Therefore, **using a GPIO
88without setting its direction first is illegal and will result in undefined
89behavior!**
90
91
92Spinlock-Safe GPIO Access
93-------------------------
94Most GPIO controllers can be accessed with memory read/write instructions. Those
95don't need to sleep, and can safely be done from inside hard (non-threaded) IRQ
96handlers and similar contexts.
97
98Use the following calls to access GPIOs from an atomic context:
99
100 int gpiod_get_value(const struct gpio_desc *desc);
101 void gpiod_set_value(struct gpio_desc *desc, int value);
102
103The values are boolean, zero for low, nonzero for high. When reading the value
104of an output pin, the value returned should be what's seen on the pin. That
105won't always match the specified output value, because of issues including
106open-drain signaling and output latencies.
107
108The get/set calls do not return errors because "invalid GPIO" should have been
109reported earlier from gpiod_direction_*(). However, note that not all platforms
110can read the value of output pins; those that can't should always return zero.
111Also, using these calls for GPIOs that can't safely be accessed without sleeping
112(see below) is an error.
113
114
115GPIO Access That May Sleep
116--------------------------
117Some GPIO controllers must be accessed using message based buses like I2C or
118SPI. Commands to read or write those GPIO values require waiting to get to the
119head of a queue to transmit a command and get its response. This requires
120sleeping, which can't be done from inside IRQ handlers.
121
122Platforms that support this type of GPIO distinguish them from other GPIOs by
123returning nonzero from this call:
124
125 int gpiod_cansleep(const struct gpio_desc *desc)
126
127To access such GPIOs, a different set of accessors is defined:
128
129 int gpiod_get_value_cansleep(const struct gpio_desc *desc)
130 void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
131
132Accessing such GPIOs requires a context which may sleep, for example a threaded
133IRQ handler, and those accessors must be used instead of spinlock-safe
134accessors without the cansleep() name suffix.
135
136Other than the fact that these accessors might sleep, and will work on GPIOs
137that can't be accessed from hardIRQ handlers, these calls act the same as the
138spinlock-safe calls.
139
140
141Active-low State and Raw GPIO Values
142------------------------------------
143Device drivers like to manage the logical state of a GPIO, i.e. the value their
144device will actually receive, no matter what lies between it and the GPIO line.
145In some cases, it might make sense to control the actual GPIO line value. The
146following set of calls ignore the active-low property of a GPIO and work on the
147raw line value:
148
149 int gpiod_get_raw_value(const struct gpio_desc *desc)
150 void gpiod_set_raw_value(struct gpio_desc *desc, int value)
151 int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
152 void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
153
154The active-low state of a GPIO can also be queried using the following call:
155
156 int gpiod_is_active_low(const struct gpio_desc *desc)
157
158Note that these functions should only be used with great moderation ; a driver
159should not have to care about the physical line level.
160
161GPIOs mapped to IRQs
162--------------------
163GPIO lines can quite often be used as IRQs. You can get the IRQ number
164corresponding to a given GPIO using the following call:
165
166 int gpiod_to_irq(const struct gpio_desc *desc)
167
168It will return an IRQ number, or an negative errno code if the mapping can't be
169done (most likely because that particular GPIO cannot be used as IRQ). It is an
170unchecked error to use a GPIO that wasn't set up as an input using
171gpiod_direction_input(), or to use an IRQ number that didn't originally come
172from gpiod_to_irq(). gpiod_to_irq() is not allowed to sleep.
173
174Non-error values returned from gpiod_to_irq() can be passed to request_irq() or
175free_irq(). They will often be stored into IRQ resources for platform devices,
176by the board-specific initialization code. Note that IRQ trigger options are
177part of the IRQ interface, e.g. IRQF_TRIGGER_FALLING, as are system wakeup
178capabilities.
179
180
181Interacting With the Legacy GPIO Subsystem
182==========================================
183Many kernel subsystems still handle GPIOs using the legacy integer-based
184interface. Although it is strongly encouraged to upgrade them to the safer
185descriptor-based API, the following two functions allow you to convert a GPIO
186descriptor into the GPIO integer namespace and vice-versa:
187
188 int desc_to_gpio(const struct gpio_desc *desc)
189 struct gpio_desc *gpio_to_desc(unsigned gpio)
190
191The GPIO number returned by desc_to_gpio() can be safely used as long as the
192GPIO descriptor has not been freed. All the same, a GPIO number passed to
193gpio_to_desc() must have been properly acquired, and usage of the returned GPIO
194descriptor is only possible after the GPIO number has been released.
195
196Freeing a GPIO obtained by one API with the other API is forbidden and an
197unchecked error.
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
new file mode 100644
index 000000000000..9da0bfa74781
--- /dev/null
+++ b/Documentation/gpio/driver.txt
@@ -0,0 +1,75 @@
1GPIO Descriptor Driver Interface
2================================
3
4This document serves as a guide for GPIO chip drivers writers. Note that it
5describes the new descriptor-based interface. For a description of the
6deprecated integer-based GPIO interface please refer to gpio-legacy.txt.
7
8Each GPIO controller driver needs to include the following header, which defines
9the structures used to define a GPIO driver:
10
11 #include <linux/gpio/driver.h>
12
13
14Internal Representation of GPIOs
15================================
16
17Inside a GPIO driver, individual GPIOs are identified by their hardware number,
18which is a unique number between 0 and n, n being the number of GPIOs managed by
19the chip. This number is purely internal: the hardware number of a particular
20GPIO descriptor is never made visible outside of the driver.
21
22On top of this internal number, each GPIO also need to have a global number in
23the integer GPIO namespace so that it can be used with the legacy GPIO
24interface. Each chip must thus have a "base" number (which can be automatically
25assigned), and for each GPIO the global number will be (base + hardware number).
26Although the integer representation is considered deprecated, it still has many
27users and thus needs to be maintained.
28
29So for example one platform could use numbers 32-159 for GPIOs, with a
30controller defining 128 GPIOs at a "base" of 32 ; while another platform uses
31numbers 0..63 with one set of GPIO controllers, 64-79 with another type of GPIO
32controller, and on one particular board 80-95 with an FPGA. The numbers need not
33be contiguous; either of those platforms could also use numbers 2000-2063 to
34identify GPIOs in a bank of I2C GPIO expanders.
35
36
37Controller Drivers: gpio_chip
38=============================
39
40In the gpiolib framework each GPIO controller is packaged as a "struct
41gpio_chip" (see linux/gpio/driver.h for its complete definition) with members
42common to each controller of that type:
43
44 - methods to establish GPIO direction
45 - methods used to access GPIO values
46 - method to return the IRQ number associated to a given GPIO
47 - flag saying whether calls to its methods may sleep
48 - optional debugfs dump method (showing extra state like pullup config)
49 - optional base number (will be automatically assigned if omitted)
50 - label for diagnostics and GPIOs mapping using platform data
51
52The code implementing a gpio_chip should support multiple instances of the
53controller, possibly using the driver model. That code will configure each
54gpio_chip and issue gpiochip_add(). Removing a GPIO controller should be rare;
55use gpiochip_remove() when it is unavoidable.
56
57Most often a gpio_chip is part of an instance-specific structure with state not
58exposed by the GPIO interfaces, such as addressing, power management, and more.
59Chips such as codecs will have complex non-GPIO state.
60
61Any debugfs dump method should normally ignore signals which haven't been
62requested as GPIOs. They can use gpiochip_is_requested(), which returns either
63NULL or the label associated with that GPIO when it was requested.
64
65Locking IRQ usage
66-----------------
67Input GPIOs can be used as IRQ signals. When this happens, a driver is requested
68to mark the GPIO as being used as an IRQ:
69
70 int gpiod_lock_as_irq(struct gpio_desc *desc)
71
72This will prevent the use of non-irq related GPIO APIs until the GPIO IRQ lock
73is released:
74
75 void gpiod_unlock_as_irq(struct gpio_desc *desc)
diff --git a/Documentation/gpio.txt b/Documentation/gpio/gpio-legacy.txt
index 6f83fa965b4b..6f83fa965b4b 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio/gpio-legacy.txt
diff --git a/Documentation/gpio/gpio.txt b/Documentation/gpio/gpio.txt
new file mode 100644
index 000000000000..cd9b356e88cd
--- /dev/null
+++ b/Documentation/gpio/gpio.txt
@@ -0,0 +1,119 @@
1GPIO Interfaces
2===============
3
4The documents in this directory give detailed instructions on how to access
5GPIOs in drivers, and how to write a driver for a device that provides GPIOs
6itself.
7
8Due to the history of GPIO interfaces in the kernel, there are two different
9ways to obtain and use GPIOs:
10
11 - The descriptor-based interface is the preferred way to manipulate GPIOs,
12and is described by all the files in this directory excepted gpio-legacy.txt.
13 - The legacy integer-based interface which is considered deprecated (but still
14usable for compatibility reasons) is documented in gpio-legacy.txt.
15
16The remainder of this document applies to the new descriptor-based interface.
17gpio-legacy.txt contains the same information applied to the legacy
18integer-based interface.
19
20
21What is a GPIO?
22===============
23
24A "General Purpose Input/Output" (GPIO) is a flexible software-controlled
25digital signal. They are provided from many kinds of chip, and are familiar
26to Linux developers working with embedded and custom hardware. Each GPIO
27represents a bit connected to a particular pin, or "ball" on Ball Grid Array
28(BGA) packages. Board schematics show which external hardware connects to
29which GPIOs. Drivers can be written generically, so that board setup code
30passes such pin configuration data to drivers.
31
32System-on-Chip (SOC) processors heavily rely on GPIOs. In some cases, every
33non-dedicated pin can be configured as a GPIO; and most chips have at least
34several dozen of them. Programmable logic devices (like FPGAs) can easily
35provide GPIOs; multifunction chips like power managers, and audio codecs
36often have a few such pins to help with pin scarcity on SOCs; and there are
37also "GPIO Expander" chips that connect using the I2C or SPI serial buses.
38Most PC southbridges have a few dozen GPIO-capable pins (with only the BIOS
39firmware knowing how they're used).
40
41The exact capabilities of GPIOs vary between systems. Common options:
42
43 - Output values are writable (high=1, low=0). Some chips also have
44 options about how that value is driven, so that for example only one
45 value might be driven, supporting "wire-OR" and similar schemes for the
46 other value (notably, "open drain" signaling).
47
48 - Input values are likewise readable (1, 0). Some chips support readback
49 of pins configured as "output", which is very useful in such "wire-OR"
50 cases (to support bidirectional signaling). GPIO controllers may have
51 input de-glitch/debounce logic, sometimes with software controls.
52
53 - Inputs can often be used as IRQ signals, often edge triggered but
54 sometimes level triggered. Such IRQs may be configurable as system
55 wakeup events, to wake the system from a low power state.
56
57 - Usually a GPIO will be configurable as either input or output, as needed
58 by different product boards; single direction ones exist too.
59
60 - Most GPIOs can be accessed while holding spinlocks, but those accessed
61 through a serial bus normally can't. Some systems support both types.
62
63On a given board each GPIO is used for one specific purpose like monitoring
64MMC/SD card insertion/removal, detecting card write-protect status, driving
65a LED, configuring a transceiver, bit-banging a serial bus, poking a hardware
66watchdog, sensing a switch, and so on.
67
68
69Common GPIO Properties
70======================
71
72These properties are met through all the other documents of the GPIO interface
73and it is useful to understand them, especially if you need to define GPIO
74mappings.
75
76Active-High and Active-Low
77--------------------------
78It is natural to assume that a GPIO is "active" when its output signal is 1
79("high"), and inactive when it is 0 ("low"). However in practice the signal of a
80GPIO may be inverted before is reaches its destination, or a device could decide
81to have different conventions about what "active" means. Such decisions should
82be transparent to device drivers, therefore it is possible to define a GPIO as
83being either active-high ("1" means "active", the default) or active-low ("0"
84means "active") so that drivers only need to worry about the logical signal and
85not about what happens at the line level.
86
87Open Drain and Open Source
88--------------------------
89Sometimes shared signals need to use "open drain" (where only the low signal
90level is actually driven), or "open source" (where only the high signal level is
91driven) signaling. That term applies to CMOS transistors; "open collector" is
92used for TTL. A pullup or pulldown resistor causes the high or low signal level.
93This is sometimes called a "wire-AND"; or more practically, from the negative
94logic (low=true) perspective this is a "wire-OR".
95
96One common example of an open drain signal is a shared active-low IRQ line.
97Also, bidirectional data bus signals sometimes use open drain signals.
98
99Some GPIO controllers directly support open drain and open source outputs; many
100don't. When you need open drain signaling but your hardware doesn't directly
101support it, there's a common idiom you can use to emulate it with any GPIO pin
102that can be used as either an input or an output:
103
104 LOW: gpiod_direction_output(gpio, 0) ... this drives the signal and overrides
105 the pullup.
106
107 HIGH: gpiod_direction_input(gpio) ... this turns off the output, so the pullup
108 (or some other device) controls the signal.
109
110The same logic can be applied to emulate open source signaling, by driving the
111high signal and configuring the GPIO as input for low. This open drain/open
112source emulation can be handled transparently by the GPIO framework.
113
114If you are "driving" the signal high but gpiod_get_value(gpio) reports a low
115value (after the appropriate rise time passes), you know some other component is
116driving the shared signal low. That's not necessarily an error. As one common
117example, that's how I2C clocks are stretched: a slave that needs a slower clock
118delays the rising edge of SCK, and the I2C master adjusts its signaling rate
119accordingly.
diff --git a/Documentation/gpio/sysfs.txt b/Documentation/gpio/sysfs.txt
new file mode 100644
index 000000000000..c2c3a97f8ff7
--- /dev/null
+++ b/Documentation/gpio/sysfs.txt
@@ -0,0 +1,155 @@
1GPIO Sysfs Interface for Userspace
2==================================
3
4Platforms which use the "gpiolib" implementors framework may choose to
5configure a sysfs user interface to GPIOs. This is different from the
6debugfs interface, since it provides control over GPIO direction and
7value instead of just showing a gpio state summary. Plus, it could be
8present on production systems without debugging support.
9
10Given appropriate hardware documentation for the system, userspace could
11know for example that GPIO #23 controls the write protect line used to
12protect boot loader segments in flash memory. System upgrade procedures
13may need to temporarily remove that protection, first importing a GPIO,
14then changing its output state, then updating the code before re-enabling
15the write protection. In normal use, GPIO #23 would never be touched,
16and the kernel would have no need to know about it.
17
18Again depending on appropriate hardware documentation, on some systems
19userspace GPIO can be used to determine system configuration data that
20standard kernels won't know about. And for some tasks, simple userspace
21GPIO drivers could be all that the system really needs.
22
23Note that standard kernel drivers exist for common "LEDs and Buttons"
24GPIO tasks: "leds-gpio" and "gpio_keys", respectively. Use those
25instead of talking directly to the GPIOs; they integrate with kernel
26frameworks better than your userspace code could.
27
28
29Paths in Sysfs
30--------------
31There are three kinds of entry in /sys/class/gpio:
32
33 - Control interfaces used to get userspace control over GPIOs;
34
35 - GPIOs themselves; and
36
37 - GPIO controllers ("gpio_chip" instances).
38
39That's in addition to standard files including the "device" symlink.
40
41The control interfaces are write-only:
42
43 /sys/class/gpio/
44
45 "export" ... Userspace may ask the kernel to export control of
46 a GPIO to userspace by writing its number to this file.
47
48 Example: "echo 19 > export" will create a "gpio19" node
49 for GPIO #19, if that's not requested by kernel code.
50
51 "unexport" ... Reverses the effect of exporting to userspace.
52
53 Example: "echo 19 > unexport" will remove a "gpio19"
54 node exported using the "export" file.
55
56GPIO signals have paths like /sys/class/gpio/gpio42/ (for GPIO #42)
57and have the following read/write attributes:
58
59 /sys/class/gpio/gpioN/
60
61 "direction" ... reads as either "in" or "out". This value may
62 normally be written. Writing as "out" defaults to
63 initializing the value as low. To ensure glitch free
64 operation, values "low" and "high" may be written to
65 configure the GPIO as an output with that initial value.
66
67 Note that this attribute *will not exist* if the kernel
68 doesn't support changing the direction of a GPIO, or
69 it was exported by kernel code that didn't explicitly
70 allow userspace to reconfigure this GPIO's direction.
71
72 "value" ... reads as either 0 (low) or 1 (high). If the GPIO
73 is configured as an output, this value may be written;
74 any nonzero value is treated as high.
75
76 If the pin can be configured as interrupt-generating interrupt
77 and if it has been configured to generate interrupts (see the
78 description of "edge"), you can poll(2) on that file and
79 poll(2) will return whenever the interrupt was triggered. If
80 you use poll(2), set the events POLLPRI and POLLERR. If you
81 use select(2), set the file descriptor in exceptfds. After
82 poll(2) returns, either lseek(2) to the beginning of the sysfs
83 file and read the new value or close the file and re-open it
84 to read the value.
85
86 "edge" ... reads as either "none", "rising", "falling", or
87 "both". Write these strings to select the signal edge(s)
88 that will make poll(2) on the "value" file return.
89
90 This file exists only if the pin can be configured as an
91 interrupt generating input pin.
92
93 "active_low" ... reads as either 0 (false) or 1 (true). Write
94 any nonzero value to invert the value attribute both
95 for reading and writing. Existing and subsequent
96 poll(2) support configuration via the edge attribute
97 for "rising" and "falling" edges will follow this
98 setting.
99
100GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
101controller implementing GPIOs starting at #42) and have the following
102read-only attributes:
103
104 /sys/class/gpio/gpiochipN/
105
106 "base" ... same as N, the first GPIO managed by this chip
107
108 "label" ... provided for diagnostics (not always unique)
109
110 "ngpio" ... how many GPIOs this manges (N to N + ngpio - 1)
111
112Board documentation should in most cases cover what GPIOs are used for
113what purposes. However, those numbers are not always stable; GPIOs on
114a daughtercard might be different depending on the base board being used,
115or other cards in the stack. In such cases, you may need to use the
116gpiochip nodes (possibly in conjunction with schematics) to determine
117the correct GPIO number to use for a given signal.
118
119
120Exporting from Kernel code
121--------------------------
122Kernel code can explicitly manage exports of GPIOs which have already been
123requested using gpio_request():
124
125 /* export the GPIO to userspace */
126 int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
127
128 /* reverse gpio_export() */
129 void gpiod_unexport(struct gpio_desc *desc);
130
131 /* create a sysfs link to an exported GPIO node */
132 int gpiod_export_link(struct device *dev, const char *name,
133 struct gpio_desc *desc);
134
135 /* change the polarity of a GPIO node in sysfs */
136 int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
137
138After a kernel driver requests a GPIO, it may only be made available in
139the sysfs interface by gpiod_export(). The driver can control whether the
140signal direction may change. This helps drivers prevent userspace code
141from accidentally clobbering important system state.
142
143This explicit exporting can help with debugging (by making some kinds
144of experiments easier), or can provide an always-there interface that's
145suitable for documenting as part of a board support package.
146
147After the GPIO has been exported, gpiod_export_link() allows creating
148symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can
149use this to provide the interface under their own device in sysfs with
150a descriptive name.
151
152Drivers can use gpiod_sysfs_set_active_low() to hide GPIO line polarity
153differences between boards from user space. Polarity change can be done both
154before and after gpiod_export(), and previously enabled poll(2) support for
155either rising or falling edge will be reconfigured to follow this setting.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9ca3e74a10e1..50680a59a2ff 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1190,15 +1190,24 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1190 owned by uid=0. 1190 owned by uid=0.
1191 1191
1192 ima_hash= [IMA] 1192 ima_hash= [IMA]
1193 Format: { "sha1" | "md5" } 1193 Format: { md5 | sha1 | rmd160 | sha256 | sha384
1194 | sha512 | ... }
1194 default: "sha1" 1195 default: "sha1"
1195 1196
1197 The list of supported hash algorithms is defined
1198 in crypto/hash_info.h.
1199
1196 ima_tcb [IMA] 1200 ima_tcb [IMA]
1197 Load a policy which meets the needs of the Trusted 1201 Load a policy which meets the needs of the Trusted
1198 Computing Base. This means IMA will measure all 1202 Computing Base. This means IMA will measure all
1199 programs exec'd, files mmap'd for exec, and all files 1203 programs exec'd, files mmap'd for exec, and all files
1200 opened for read by uid=0. 1204 opened for read by uid=0.
1201 1205
1206 ima_template= [IMA]
1207 Select one of defined IMA measurements template formats.
1208 Formats: { "ima" | "ima-ng" }
1209 Default: "ima-ng"
1210
1202 init= [KNL] 1211 init= [KNL]
1203 Format: <full_path> 1212 Format: <full_path>
1204 Run specified binary instead of /sbin/init as init 1213 Run specified binary instead of /sbin/init as init
diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c
index 0c980ad40b17..4d17487d5ad9 100644
--- a/Documentation/mic/mpssd/mpssd.c
+++ b/Documentation/mic/mpssd/mpssd.c
@@ -313,7 +313,7 @@ static struct mic_device_desc *get_device_desc(struct mic_info *mic, int type)
313 int i; 313 int i;
314 void *dp = get_dp(mic, type); 314 void *dp = get_dp(mic, type);
315 315
316 for (i = mic_aligned_size(struct mic_bootparam); i < PAGE_SIZE; 316 for (i = sizeof(struct mic_bootparam); i < PAGE_SIZE;
317 i += mic_total_desc_size(d)) { 317 i += mic_total_desc_size(d)) {
318 d = dp + i; 318 d = dp + i;
319 319
@@ -445,8 +445,8 @@ init_vr(struct mic_info *mic, int fd, int type,
445 __func__, mic->name, vr0->va, vr0->info, vr_size, 445 __func__, mic->name, vr0->va, vr0->info, vr_size,
446 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); 446 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
447 mpsslog("magic 0x%x expected 0x%x\n", 447 mpsslog("magic 0x%x expected 0x%x\n",
448 vr0->info->magic, MIC_MAGIC + type); 448 le32toh(vr0->info->magic), MIC_MAGIC + type);
449 assert(vr0->info->magic == MIC_MAGIC + type); 449 assert(le32toh(vr0->info->magic) == MIC_MAGIC + type);
450 if (vr1) { 450 if (vr1) {
451 vr1->va = (struct mic_vring *) 451 vr1->va = (struct mic_vring *)
452 &va[MIC_DEVICE_PAGE_END + vr_size]; 452 &va[MIC_DEVICE_PAGE_END + vr_size];
@@ -458,8 +458,8 @@ init_vr(struct mic_info *mic, int fd, int type,
458 __func__, mic->name, vr1->va, vr1->info, vr_size, 458 __func__, mic->name, vr1->va, vr1->info, vr_size,
459 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); 459 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
460 mpsslog("magic 0x%x expected 0x%x\n", 460 mpsslog("magic 0x%x expected 0x%x\n",
461 vr1->info->magic, MIC_MAGIC + type + 1); 461 le32toh(vr1->info->magic), MIC_MAGIC + type + 1);
462 assert(vr1->info->magic == MIC_MAGIC + type + 1); 462 assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1);
463 } 463 }
464done: 464done:
465 return va; 465 return va;
@@ -520,7 +520,7 @@ static void *
520virtio_net(void *arg) 520virtio_net(void *arg)
521{ 521{
522 static __u8 vnet_hdr[2][sizeof(struct virtio_net_hdr)]; 522 static __u8 vnet_hdr[2][sizeof(struct virtio_net_hdr)];
523 static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __aligned(64); 523 static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __attribute__ ((aligned(64)));
524 struct iovec vnet_iov[2][2] = { 524 struct iovec vnet_iov[2][2] = {
525 { { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) }, 525 { { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) },
526 { .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } }, 526 { .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } },
@@ -1412,6 +1412,12 @@ mic_config(void *arg)
1412 } 1412 }
1413 1413
1414 do { 1414 do {
1415 ret = lseek(fd, 0, SEEK_SET);
1416 if (ret < 0) {
1417 mpsslog("%s: Failed to seek to file start '%s': %s\n",
1418 mic->name, pathname, strerror(errno));
1419 goto close_error1;
1420 }
1415 ret = read(fd, value, sizeof(value)); 1421 ret = read(fd, value, sizeof(value));
1416 if (ret < 0) { 1422 if (ret < 0) {
1417 mpsslog("%s: Failed to read sysfs entry '%s': %s\n", 1423 mpsslog("%s: Failed to read sysfs entry '%s': %s\n",
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 8b8a05787641..3c12d9a7ed00 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -577,9 +577,6 @@ tcp_limit_output_bytes - INTEGER
577 typical pfifo_fast qdiscs. 577 typical pfifo_fast qdiscs.
578 tcp_limit_output_bytes limits the number of bytes on qdisc 578 tcp_limit_output_bytes limits the number of bytes on qdisc
579 or device to reduce artificial RTT/cwnd and reduce bufferbloat. 579 or device to reduce artificial RTT/cwnd and reduce bufferbloat.
580 Note: For GSO/TSO enabled flows, we try to have at least two
581 packets in flight. Reducing tcp_limit_output_bytes might also
582 reduce the size of individual GSO packet (64KB being the max)
583 Default: 131072 580 Default: 131072
584 581
585tcp_challenge_ack_limit - INTEGER 582tcp_challenge_ack_limit - INTEGER
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index c01223628a87..8e48e3b14227 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
123[shutdown] close() --------> destruction of the transmission socket and 123[shutdown] close() --------> destruction of the transmission socket and
124 deallocation of all associated resources. 124 deallocation of all associated resources.
125 125
126Socket creation and destruction is also straight forward, and is done
127the same way as in capturing described in the previous paragraph:
128
129 int fd = socket(PF_PACKET, mode, 0);
130
131The protocol can optionally be 0 in case we only want to transmit
132via this socket, which avoids an expensive call to packet_rcv().
133In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
134set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
135
126Binding the socket to your network interface is mandatory (with zero copy) to 136Binding the socket to your network interface is mandatory (with zero copy) to
127know the header size of frames used in the circular buffer. 137know the header size of frames used in the circular buffer.
128 138
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 0f54333b0ff2..b6ce00b2be9a 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -547,13 +547,11 @@ helper functions described in Section 4. In that case, pm_runtime_resume()
547should be used. Of course, for this purpose the device's runtime PM has to be 547should be used. Of course, for this purpose the device's runtime PM has to be
548enabled earlier by calling pm_runtime_enable(). 548enabled earlier by calling pm_runtime_enable().
549 549
550If the device bus type's or driver's ->probe() callback runs 550It may be desirable to suspend the device once ->probe() has finished.
551pm_runtime_suspend() or pm_runtime_idle() or their asynchronous counterparts, 551Therefore the driver core uses the asyncronous pm_request_idle() to submit a
552they will fail returning -EAGAIN, because the device's usage counter is 552request to execute the subsystem-level idle callback for the device at that
553incremented by the driver core before executing ->probe(). Still, it may be 553time. A driver that makes use of the runtime autosuspend feature, may want to
554desirable to suspend the device as soon as ->probe() has finished, so the driver 554update the last busy mark before returning from ->probe().
555core uses pm_runtime_put_sync() to invoke the subsystem-level idle callback for
556the device at that time.
557 555
558Moreover, the driver core prevents runtime PM callbacks from racing with the bus 556Moreover, the driver core prevents runtime PM callbacks from racing with the bus
559notifier callback in __device_release_driver(), which is necessary, because the 557notifier callback in __device_release_driver(), which is necessary, because the
@@ -656,7 +654,7 @@ out the following operations:
656 __pm_runtime_disable() with 'false' as the second argument for every device 654 __pm_runtime_disable() with 'false' as the second argument for every device
657 right before executing the subsystem-level .suspend_late() callback for it. 655 right before executing the subsystem-level .suspend_late() callback for it.
658 656
659 * During system resume it calls pm_runtime_enable() and pm_runtime_put_sync() 657 * During system resume it calls pm_runtime_enable() and pm_runtime_put()
660 for every device right after executing the subsystem-level .resume_early() 658 for every device right after executing the subsystem-level .resume_early()
661 callback and right after executing the subsystem-level .resume() callback 659 callback and right after executing the subsystem-level .resume() callback
662 for it, respectively. 660 for it, respectively.
diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX
index 414235c1fcfc..45c82fd3e9d3 100644
--- a/Documentation/security/00-INDEX
+++ b/Documentation/security/00-INDEX
@@ -22,3 +22,5 @@ keys.txt
22 - description of the kernel key retention service. 22 - description of the kernel key retention service.
23tomoyo.txt 23tomoyo.txt
24 - documentation on the TOMOYO Linux Security Module. 24 - documentation on the TOMOYO Linux Security Module.
25IMA-templates.txt
26 - documentation on the template management mechanism for IMA.
diff --git a/Documentation/security/IMA-templates.txt b/Documentation/security/IMA-templates.txt
new file mode 100644
index 000000000000..a777e5f1df5b
--- /dev/null
+++ b/Documentation/security/IMA-templates.txt
@@ -0,0 +1,87 @@
1 IMA Template Management Mechanism
2
3
4==== INTRODUCTION ====
5
6The original 'ima' template is fixed length, containing the filedata hash
7and pathname. The filedata hash is limited to 20 bytes (md5/sha1).
8The pathname is a null terminated string, limited to 255 characters.
9To overcome these limitations and to add additional file metadata, it is
10necessary to extend the current version of IMA by defining additional
11templates. For example, information that could be possibly reported are
12the inode UID/GID or the LSM labels either of the inode and of the process
13that is accessing it.
14
15However, the main problem to introduce this feature is that, each time
16a new template is defined, the functions that generate and display
17the measurements list would include the code for handling a new format
18and, thus, would significantly grow over the time.
19
20The proposed solution solves this problem by separating the template
21management from the remaining IMA code. The core of this solution is the
22definition of two new data structures: a template descriptor, to determine
23which information should be included in the measurement list; a template
24field, to generate and display data of a given type.
25
26Managing templates with these structures is very simple. To support
27a new data type, developers define the field identifier and implement
28two functions, init() and show(), respectively to generate and display
29measurement entries. Defining a new template descriptor requires
30specifying the template format, a string of field identifiers separated
31by the '|' character. While in the current implementation it is possible
32to define new template descriptors only by adding their definition in the
33template specific code (ima_template.c), in a future version it will be
34possible to register a new template on a running kernel by supplying to IMA
35the desired format string. In this version, IMA initializes at boot time
36all defined template descriptors by translating the format into an array
37of template fields structures taken from the set of the supported ones.
38
39After the initialization step, IMA will call ima_alloc_init_template()
40(new function defined within the patches for the new template management
41mechanism) to generate a new measurement entry by using the template
42descriptor chosen through the kernel configuration or through the newly
43introduced 'ima_template=' kernel command line parameter. It is during this
44phase that the advantages of the new architecture are clearly shown:
45the latter function will not contain specific code to handle a given template
46but, instead, it simply calls the init() method of the template fields
47associated to the chosen template descriptor and store the result (pointer
48to allocated data and data length) in the measurement entry structure.
49
50The same mechanism is employed to display measurements entries.
51The functions ima[_ascii]_measurements_show() retrieve, for each entry,
52the template descriptor used to produce that entry and call the show()
53method for each item of the array of template fields structures.
54
55
56
57==== SUPPORTED TEMPLATE FIELDS AND DESCRIPTORS ====
58
59In the following, there is the list of supported template fields
60('<identifier>': description), that can be used to define new template
61descriptors by adding their identifier to the format string
62(support for more data types will be added later):
63
64 - 'd': the digest of the event (i.e. the digest of a measured file),
65 calculated with the SHA1 or MD5 hash algorithm;
66 - 'n': the name of the event (i.e. the file name), with size up to 255 bytes;
67 - 'd-ng': the digest of the event, calculated with an arbitrary hash
68 algorithm (field format: [<hash algo>:]digest, where the digest
69 prefix is shown only if the hash algorithm is not SHA1 or MD5);
70 - 'n-ng': the name of the event, without size limitations.
71
72
73Below, there is the list of defined template descriptors:
74 - "ima": its format is 'd|n';
75 - "ima-ng" (default): its format is 'd-ng|n-ng'.
76
77
78
79==== USE ====
80
81To specify the template descriptor to be used to generate measurement entries,
82currently the following methods are supported:
83
84 - select a template descriptor among those supported in the kernel
85 configuration ('ima-ng' is the default choice);
86 - specify a template descriptor name from the kernel command line through
87 the 'ima_template=' parameter.
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 7b4145d00452..a4c33f1a7c6d 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -865,15 +865,14 @@ encountered:
865 calling processes has a searchable link to the key from one of its 865 calling processes has a searchable link to the key from one of its
866 keyrings. There are three functions for dealing with these: 866 keyrings. There are three functions for dealing with these:
867 867
868 key_ref_t make_key_ref(const struct key *key, 868 key_ref_t make_key_ref(const struct key *key, bool possession);
869 unsigned long possession);
870 869
871 struct key *key_ref_to_ptr(const key_ref_t key_ref); 870 struct key *key_ref_to_ptr(const key_ref_t key_ref);
872 871
873 unsigned long is_key_possessed(const key_ref_t key_ref); 872 bool is_key_possessed(const key_ref_t key_ref);
874 873
875 The first function constructs a key reference from a key pointer and 874 The first function constructs a key reference from a key pointer and
876 possession information (which must be 0 or 1 and not any other value). 875 possession information (which must be true or false).
877 876
878 The second function retrieves the key pointer from a reference and the 877 The second function retrieves the key pointer from a reference and the
879 third retrieves the possession flag. 878 third retrieves the possession flag.
@@ -961,14 +960,17 @@ payload contents" for more information.
961 the argument will not be parsed. 960 the argument will not be parsed.
962 961
963 962
964(*) Extra references can be made to a key by calling the following function: 963(*) Extra references can be made to a key by calling one of the following
964 functions:
965 965
966 struct key *__key_get(struct key *key);
966 struct key *key_get(struct key *key); 967 struct key *key_get(struct key *key);
967 968
968 These need to be disposed of by calling key_put() when they've been 969 Keys so references will need to be disposed of by calling key_put() when
969 finished with. The key pointer passed in will be returned. If the pointer 970 they've been finished with. The key pointer passed in will be returned.
970 is NULL or CONFIG_KEYS is not set then the key will not be dereferenced and 971
971 no increment will take place. 972 In the case of key_get(), if the pointer is NULL or CONFIG_KEYS is not set
973 then the key will not be dereferenced and no increment will take place.
972 974
973 975
974(*) A key's serial number can be obtained by calling: 976(*) A key's serial number can be obtained by calling:
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 54d29c1320ed..230ce71f4d75 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -440,15 +440,15 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
440 buf += " /*\n" 440 buf += " /*\n"
441 buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" 441 buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
442 buf += " */\n" 442 buf += " */\n"
443 buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" 443 buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
444 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n" 444 buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n"
445 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n" 445 buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
446 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n" 446 buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n"
447 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n" 447 buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
448 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" 448 buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
449 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" 449 buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
450 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" 450 buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
451 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" 451 buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
452 buf += " /*\n" 452 buf += " /*\n"
453 buf += " * Register the fabric for use within TCM\n" 453 buf += " * Register the fabric for use within TCM\n"
454 buf += " */\n" 454 buf += " */\n"
diff --git a/Documentation/vm/split_page_table_lock b/Documentation/vm/split_page_table_lock
index 7521d367f21d..6dea4fd5c961 100644
--- a/Documentation/vm/split_page_table_lock
+++ b/Documentation/vm/split_page_table_lock
@@ -63,9 +63,9 @@ levels.
63PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table 63PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table
64allocation and pgtable_pmd_page_dtor() on freeing. 64allocation and pgtable_pmd_page_dtor() on freeing.
65 65
66Allocation usually happens in pmd_alloc_one(), freeing in pmd_free(), but 66Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and
67make sure you cover all PMD table allocation / freeing paths: i.e X86_PAE 67pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing
68preallocate few PMDs on pgd_alloc(). 68paths: i.e X86_PAE preallocate few PMDs on pgd_alloc().
69 69
70With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK. 70With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
71 71
diff --git a/MAINTAINERS b/MAINTAINERS
index 0e598aeed539..1344816c4c06 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -893,20 +893,15 @@ F: arch/arm/include/asm/hardware/dec21285.h
893F: arch/arm/mach-footbridge/ 893F: arch/arm/mach-footbridge/
894 894
895ARM/FREESCALE IMX / MXC ARM ARCHITECTURE 895ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
896M: Shawn Guo <shawn.guo@linaro.org>
896M: Sascha Hauer <kernel@pengutronix.de> 897M: Sascha Hauer <kernel@pengutronix.de>
897L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 898L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
898S: Maintained 899S: Maintained
899T: git git://git.pengutronix.de/git/imx/linux-2.6.git 900T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
900F: arch/arm/mach-imx/ 901F: arch/arm/mach-imx/
902F: arch/arm/boot/dts/imx*
901F: arch/arm/configs/imx*_defconfig 903F: arch/arm/configs/imx*_defconfig
902 904
903ARM/FREESCALE IMX6
904M: Shawn Guo <shawn.guo@linaro.org>
905L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
906S: Maintained
907T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
908F: arch/arm/mach-imx/*imx6*
909
910ARM/FREESCALE MXS ARM ARCHITECTURE 905ARM/FREESCALE MXS ARM ARCHITECTURE
911M: Shawn Guo <shawn.guo@linaro.org> 906M: Shawn Guo <shawn.guo@linaro.org>
912L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 907L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1934,7 +1929,8 @@ S: Maintained
1934F: drivers/gpio/gpio-bt8xx.c 1929F: drivers/gpio/gpio-bt8xx.c
1935 1930
1936BTRFS FILE SYSTEM 1931BTRFS FILE SYSTEM
1937M: Chris Mason <chris.mason@fusionio.com> 1932M: Chris Mason <clm@fb.com>
1933M: Josef Bacik <jbacik@fb.com>
1938L: linux-btrfs@vger.kernel.org 1934L: linux-btrfs@vger.kernel.org
1939W: http://btrfs.wiki.kernel.org/ 1935W: http://btrfs.wiki.kernel.org/
1940Q: http://patchwork.kernel.org/project/linux-btrfs/list/ 1936Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -2137,11 +2133,17 @@ S: Maintained
2137F: Documentation/zh_CN/ 2133F: Documentation/zh_CN/
2138 2134
2139CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER 2135CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
2140M: Alexander Shishkin <alexander.shishkin@linux.intel.com> 2136M: Peter Chen <Peter.Chen@freescale.com>
2137T: git://github.com/hzpeterchen/linux-usb.git
2141L: linux-usb@vger.kernel.org 2138L: linux-usb@vger.kernel.org
2142S: Maintained 2139S: Maintained
2143F: drivers/usb/chipidea/ 2140F: drivers/usb/chipidea/
2144 2141
2142CHROME HARDWARE PLATFORM SUPPORT
2143M: Olof Johansson <olof@lixom.net>
2144S: Maintained
2145F: drivers/platform/chrome/
2146
2145CISCO VIC ETHERNET NIC DRIVER 2147CISCO VIC ETHERNET NIC DRIVER
2146M: Christian Benvenuti <benve@cisco.com> 2148M: Christian Benvenuti <benve@cisco.com>
2147M: Sujith Sankar <ssujith@cisco.com> 2149M: Sujith Sankar <ssujith@cisco.com>
@@ -2468,7 +2470,7 @@ S: Maintained
2468F: drivers/media/dvb-frontends/cxd2820r* 2470F: drivers/media/dvb-frontends/cxd2820r*
2469 2471
2470CXGB3 ETHERNET DRIVER (CXGB3) 2472CXGB3 ETHERNET DRIVER (CXGB3)
2471M: Divy Le Ray <divy@chelsio.com> 2473M: Santosh Raspatur <santosh@chelsio.com>
2472L: netdev@vger.kernel.org 2474L: netdev@vger.kernel.org
2473W: http://www.chelsio.com 2475W: http://www.chelsio.com
2474S: Supported 2476S: Supported
@@ -4038,12 +4040,26 @@ W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
4038S: Maintained 4040S: Maintained
4039F: fs/hpfs/ 4041F: fs/hpfs/
4040 4042
4043HSI SUBSYSTEM
4044M: Sebastian Reichel <sre@debian.org>
4045S: Maintained
4046F: Documentation/ABI/testing/sysfs-bus-hsi
4047F: drivers/hsi/
4048F: include/linux/hsi/
4049F: include/uapi/linux/hsi/
4050
4041HSO 3G MODEM DRIVER 4051HSO 3G MODEM DRIVER
4042M: Jan Dumon <j.dumon@option.com> 4052M: Jan Dumon <j.dumon@option.com>
4043W: http://www.pharscape.org 4053W: http://www.pharscape.org
4044S: Maintained 4054S: Maintained
4045F: drivers/net/usb/hso.c 4055F: drivers/net/usb/hso.c
4046 4056
4057HSR NETWORK PROTOCOL
4058M: Arvid Brodin <arvid.brodin@alten.se>
4059L: netdev@vger.kernel.org
4060S: Maintained
4061F: net/hsr/
4062
4047HTCPEN TOUCHSCREEN DRIVER 4063HTCPEN TOUCHSCREEN DRIVER
4048M: Pau Oliva Fora <pof@eslack.org> 4064M: Pau Oliva Fora <pof@eslack.org>
4049L: linux-input@vger.kernel.org 4065L: linux-input@vger.kernel.org
@@ -4065,6 +4081,7 @@ F: arch/x86/include/uapi/asm/hyperv.h
4065F: arch/x86/kernel/cpu/mshyperv.c 4081F: arch/x86/kernel/cpu/mshyperv.c
4066F: drivers/hid/hid-hyperv.c 4082F: drivers/hid/hid-hyperv.c
4067F: drivers/hv/ 4083F: drivers/hv/
4084F: drivers/input/serio/hyperv-keyboard.c
4068F: drivers/net/hyperv/ 4085F: drivers/net/hyperv/
4069F: drivers/scsi/storvsc_drv.c 4086F: drivers/scsi/storvsc_drv.c
4070F: drivers/video/hyperv_fb.c 4087F: drivers/video/hyperv_fb.c
@@ -4449,10 +4466,8 @@ M: Bruce Allan <bruce.w.allan@intel.com>
4449M: Carolyn Wyborny <carolyn.wyborny@intel.com> 4466M: Carolyn Wyborny <carolyn.wyborny@intel.com>
4450M: Don Skidmore <donald.c.skidmore@intel.com> 4467M: Don Skidmore <donald.c.skidmore@intel.com>
4451M: Greg Rose <gregory.v.rose@intel.com> 4468M: Greg Rose <gregory.v.rose@intel.com>
4452M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
4453M: Alex Duyck <alexander.h.duyck@intel.com> 4469M: Alex Duyck <alexander.h.duyck@intel.com>
4454M: John Ronciak <john.ronciak@intel.com> 4470M: John Ronciak <john.ronciak@intel.com>
4455M: Tushar Dave <tushar.n.dave@intel.com>
4456L: e1000-devel@lists.sourceforge.net 4471L: e1000-devel@lists.sourceforge.net
4457W: http://www.intel.com/support/feedback.htm 4472W: http://www.intel.com/support/feedback.htm
4458W: http://e1000.sourceforge.net/ 4473W: http://e1000.sourceforge.net/
@@ -5255,7 +5270,7 @@ S: Maintained
5255F: Documentation/lockdep*.txt 5270F: Documentation/lockdep*.txt
5256F: Documentation/lockstat.txt 5271F: Documentation/lockstat.txt
5257F: include/linux/lockdep.h 5272F: include/linux/lockdep.h
5258F: kernel/lockdep* 5273F: kernel/locking/
5259 5274
5260LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) 5275LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
5261M: "Richard Russon (FlatCap)" <ldm@flatcap.org> 5276M: "Richard Russon (FlatCap)" <ldm@flatcap.org>
@@ -5967,10 +5982,10 @@ F: drivers/nfc/
5967F: include/linux/platform_data/pn544.h 5982F: include/linux/platform_data/pn544.h
5968 5983
5969NFS, SUNRPC, AND LOCKD CLIENTS 5984NFS, SUNRPC, AND LOCKD CLIENTS
5970M: Trond Myklebust <Trond.Myklebust@netapp.com> 5985M: Trond Myklebust <trond.myklebust@primarydata.com>
5971L: linux-nfs@vger.kernel.org 5986L: linux-nfs@vger.kernel.org
5972W: http://client.linux-nfs.org 5987W: http://client.linux-nfs.org
5973T: git git://git.linux-nfs.org/pub/linux/nfs-2.6.git 5988T: git git://git.linux-nfs.org/projects/trondmy/linux-nfs.git
5974S: Maintained 5989S: Maintained
5975F: fs/lockd/ 5990F: fs/lockd/
5976F: fs/nfs/ 5991F: fs/nfs/
@@ -6237,8 +6252,8 @@ OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
6237M: Rob Herring <rob.herring@calxeda.com> 6252M: Rob Herring <rob.herring@calxeda.com>
6238M: Pawel Moll <pawel.moll@arm.com> 6253M: Pawel Moll <pawel.moll@arm.com>
6239M: Mark Rutland <mark.rutland@arm.com> 6254M: Mark Rutland <mark.rutland@arm.com>
6240M: Stephen Warren <swarren@wwwdotorg.org>
6241M: Ian Campbell <ijc+devicetree@hellion.org.uk> 6255M: Ian Campbell <ijc+devicetree@hellion.org.uk>
6256M: Kumar Gala <galak@codeaurora.org>
6242L: devicetree@vger.kernel.org 6257L: devicetree@vger.kernel.org
6243S: Maintained 6258S: Maintained
6244F: Documentation/devicetree/ 6259F: Documentation/devicetree/
@@ -6448,19 +6463,52 @@ F: drivers/pci/
6448F: include/linux/pci* 6463F: include/linux/pci*
6449F: arch/x86/pci/ 6464F: arch/x86/pci/
6450 6465
6466PCI DRIVER FOR IMX6
6467M: Richard Zhu <r65037@freescale.com>
6468M: Shawn Guo <shawn.guo@linaro.org>
6469L: linux-pci@vger.kernel.org
6470L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6471S: Maintained
6472F: drivers/pci/host/*imx6*
6473
6474PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
6475M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6476M: Jason Cooper <jason@lakedaemon.net>
6477L: linux-pci@vger.kernel.org
6478L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6479S: Maintained
6480F: drivers/pci/host/*mvebu*
6481
6451PCI DRIVER FOR NVIDIA TEGRA 6482PCI DRIVER FOR NVIDIA TEGRA
6452M: Thierry Reding <thierry.reding@gmail.com> 6483M: Thierry Reding <thierry.reding@gmail.com>
6453L: linux-tegra@vger.kernel.org 6484L: linux-tegra@vger.kernel.org
6485L: linux-pci@vger.kernel.org
6454S: Supported 6486S: Supported
6455F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt 6487F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
6456F: drivers/pci/host/pci-tegra.c 6488F: drivers/pci/host/pci-tegra.c
6457 6489
6490PCI DRIVER FOR RENESAS R-CAR
6491M: Simon Horman <horms@verge.net.au>
6492L: linux-pci@vger.kernel.org
6493L: linux-sh@vger.kernel.org
6494S: Maintained
6495F: drivers/pci/host/*rcar*
6496
6458PCI DRIVER FOR SAMSUNG EXYNOS 6497PCI DRIVER FOR SAMSUNG EXYNOS
6459M: Jingoo Han <jg1.han@samsung.com> 6498M: Jingoo Han <jg1.han@samsung.com>
6460L: linux-pci@vger.kernel.org 6499L: linux-pci@vger.kernel.org
6500L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6501L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
6461S: Maintained 6502S: Maintained
6462F: drivers/pci/host/pci-exynos.c 6503F: drivers/pci/host/pci-exynos.c
6463 6504
6505PCI DRIVER FOR SYNOPSIS DESIGNWARE
6506M: Mohit Kumar <mohit.kumar@st.com>
6507M: Jingoo Han <jg1.han@samsung.com>
6508L: linux-pci@vger.kernel.org
6509S: Maintained
6510F: drivers/pci/host/*designware*
6511
6464PCMCIA SUBSYSTEM 6512PCMCIA SUBSYSTEM
6465P: Linux PCMCIA Team 6513P: Linux PCMCIA Team
6466L: linux-pcmcia@lists.infradead.org 6514L: linux-pcmcia@lists.infradead.org
@@ -7379,7 +7427,6 @@ S: Maintained
7379F: kernel/sched/ 7427F: kernel/sched/
7380F: include/linux/sched.h 7428F: include/linux/sched.h
7381F: include/uapi/linux/sched.h 7429F: include/uapi/linux/sched.h
7382F: kernel/wait.c
7383F: include/linux/wait.h 7430F: include/linux/wait.h
7384 7431
7385SCORE ARCHITECTURE 7432SCORE ARCHITECTURE
@@ -7515,9 +7562,10 @@ SELINUX SECURITY MODULE
7515M: Stephen Smalley <sds@tycho.nsa.gov> 7562M: Stephen Smalley <sds@tycho.nsa.gov>
7516M: James Morris <james.l.morris@oracle.com> 7563M: James Morris <james.l.morris@oracle.com>
7517M: Eric Paris <eparis@parisplace.org> 7564M: Eric Paris <eparis@parisplace.org>
7565M: Paul Moore <paul@paul-moore.com>
7518L: selinux@tycho.nsa.gov (subscribers-only, general discussion) 7566L: selinux@tycho.nsa.gov (subscribers-only, general discussion)
7519W: http://selinuxproject.org 7567W: http://selinuxproject.org
7520T: git git://git.infradead.org/users/eparis/selinux.git 7568T: git git://git.infradead.org/users/pcmoore/selinux
7521S: Supported 7569S: Supported
7522F: include/linux/selinux* 7570F: include/linux/selinux*
7523F: security/selinux/ 7571F: security/selinux/
@@ -8664,6 +8712,7 @@ F: drivers/media/usb/tm6000/
8664TPM DEVICE DRIVER 8712TPM DEVICE DRIVER
8665M: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com> 8713M: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com>
8666M: Ashley Lai <ashley@ashleylai.com> 8714M: Ashley Lai <ashley@ashleylai.com>
8715M: Peter Huewe <peterhuewe@gmx.de>
8667M: Rajiv Andrade <mail@srajiv.net> 8716M: Rajiv Andrade <mail@srajiv.net>
8668W: http://tpmdd.sourceforge.net 8717W: http://tpmdd.sourceforge.net
8669M: Marcel Selhorst <tpmdd@selhorst.net> 8718M: Marcel Selhorst <tpmdd@selhorst.net>
@@ -8960,8 +9009,8 @@ USB PEGASUS DRIVER
8960M: Petko Manolov <petkan@nucleusys.com> 9009M: Petko Manolov <petkan@nucleusys.com>
8961L: linux-usb@vger.kernel.org 9010L: linux-usb@vger.kernel.org
8962L: netdev@vger.kernel.org 9011L: netdev@vger.kernel.org
8963T: git git://git.code.sf.net/p/pegasus2/git 9012T: git git://github.com/petkan/pegasus.git
8964W: http://pegasus2.sourceforge.net/ 9013W: https://github.com/petkan/pegasus
8965S: Maintained 9014S: Maintained
8966F: drivers/net/usb/pegasus.* 9015F: drivers/net/usb/pegasus.*
8967 9016
@@ -8982,8 +9031,8 @@ USB RTL8150 DRIVER
8982M: Petko Manolov <petkan@nucleusys.com> 9031M: Petko Manolov <petkan@nucleusys.com>
8983L: linux-usb@vger.kernel.org 9032L: linux-usb@vger.kernel.org
8984L: netdev@vger.kernel.org 9033L: netdev@vger.kernel.org
8985T: git git://git.code.sf.net/p/pegasus2/git 9034T: git git://github.com/petkan/rtl8150.git
8986W: http://pegasus2.sourceforge.net/ 9035W: https://github.com/petkan/rtl8150
8987S: Maintained 9036S: Maintained
8988F: drivers/net/usb/rtl8150.c 9037F: drivers/net/usb/rtl8150.c
8989 9038
@@ -9522,8 +9571,8 @@ F: drivers/xen/*swiotlb*
9522 9571
9523XFS FILESYSTEM 9572XFS FILESYSTEM
9524P: Silicon Graphics Inc 9573P: Silicon Graphics Inc
9574M: Dave Chinner <dchinner@fromorbit.com>
9525M: Ben Myers <bpm@sgi.com> 9575M: Ben Myers <bpm@sgi.com>
9526M: Alex Elder <elder@kernel.org>
9527M: xfs@oss.sgi.com 9576M: xfs@oss.sgi.com
9528L: xfs@oss.sgi.com 9577L: xfs@oss.sgi.com
9529W: http://oss.sgi.com/projects/xfs 9578W: http://oss.sgi.com/projects/xfs
diff --git a/Makefile b/Makefile
index 920ad07180c9..858a147fd836 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 12 2PATCHLEVEL = 13
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc4
5NAME = One Giant Leap for Frogkind 5NAME = One Giant Leap for Frogkind
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 135c674eaf9e..d39dc9b95a2c 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -16,8 +16,8 @@ config ALPHA
16 select ARCH_WANT_IPC_PARSE_VERSION 16 select ARCH_WANT_IPC_PARSE_VERSION
17 select ARCH_HAVE_NMI_SAFE_CMPXCHG 17 select ARCH_HAVE_NMI_SAFE_CMPXCHG
18 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 18 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
19 select GENERIC_CLOCKEVENTS
19 select GENERIC_SMP_IDLE_THREAD 20 select GENERIC_SMP_IDLE_THREAD
20 select GENERIC_CMOS_UPDATE
21 select GENERIC_STRNCPY_FROM_USER 21 select GENERIC_STRNCPY_FROM_USER
22 select GENERIC_STRNLEN_USER 22 select GENERIC_STRNLEN_USER
23 select HAVE_MOD_ARCH_SPECIFIC 23 select HAVE_MOD_ARCH_SPECIFIC
@@ -488,6 +488,20 @@ config VGA_HOSE
488 which always have multiple hoses, and whose consoles support it. 488 which always have multiple hoses, and whose consoles support it.
489 489
490 490
491config ALPHA_QEMU
492 bool "Run under QEMU emulation"
493 depends on !ALPHA_GENERIC
494 ---help---
495 Assume the presence of special features supported by QEMU PALcode
496 that reduce the overhead of system emulation.
497
498 Generic kernels will auto-detect QEMU. But when building a
499 system-specific kernel, the assumption is that we want to
500 elimiate as many runtime tests as possible.
501
502 If unsure, say N.
503
504
491config ALPHA_SRM 505config ALPHA_SRM
492 bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME 506 bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME
493 depends on TTY 507 depends on TTY
@@ -572,6 +586,30 @@ config NUMA
572 Access). This option is for configuring high-end multiprocessor 586 Access). This option is for configuring high-end multiprocessor
573 server machines. If in doubt, say N. 587 server machines. If in doubt, say N.
574 588
589config ALPHA_WTINT
590 bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC
591 default y if ALPHA_QEMU
592 default n if ALPHA_EV5 || ALPHA_EV56 || (ALPHA_EV4 && !ALPHA_LCA)
593 default n if !ALPHA_SRM && !ALPHA_GENERIC
594 default y if SMP
595 ---help---
596 The Wait for Interrupt (WTINT) PALcall attempts to place the CPU
597 to sleep until the next interrupt. This may reduce the power
598 consumed, and the heat produced by the computer. However, it has
599 the side effect of making the cycle counter unreliable as a timing
600 device across the sleep.
601
602 For emulation under QEMU, definitely say Y here, as we have other
603 mechanisms for measuring time than the cycle counter.
604
605 For EV4 (but not LCA), EV5 and EV56 systems, or for systems running
606 MILO, sleep mode is not supported so you might as well say N here.
607
608 For SMP systems we cannot use the cycle counter for timing anyway,
609 so you might as well say Y here.
610
611 If unsure, say N.
612
575config NODES_SHIFT 613config NODES_SHIFT
576 int 614 int
577 default "7" 615 default "7"
@@ -613,9 +651,41 @@ config VERBOSE_MCHECK_ON
613 651
614 Take the default (1) unless you want more control or more info. 652 Take the default (1) unless you want more control or more info.
615 653
654choice
655 prompt "Timer interrupt frequency (HZ)?"
656 default HZ_128 if ALPHA_QEMU
657 default HZ_1200 if ALPHA_RAWHIDE
658 default HZ_1024
659 ---help---
660 The frequency at which timer interrupts occur. A high frequency
661 minimizes latency, whereas a low frequency minimizes overhead of
662 process accounting. The later effect is especially significant
663 when being run under QEMU.
664
665 Note that some Alpha hardware cannot change the interrupt frequency
666 of the timer. If unsure, say 1024 (or 1200 for Rawhide).
667
668 config HZ_32
669 bool "32 Hz"
670 config HZ_64
671 bool "64 Hz"
672 config HZ_128
673 bool "128 Hz"
674 config HZ_256
675 bool "256 Hz"
676 config HZ_1024
677 bool "1024 Hz"
678 config HZ_1200
679 bool "1200 Hz"
680endchoice
681
616config HZ 682config HZ
617 int 683 int
618 default 1200 if ALPHA_RAWHIDE 684 default 32 if HZ_32
685 default 64 if HZ_64
686 default 128 if HZ_128
687 default 256 if HZ_256
688 default 1200 if HZ_1200
619 default 1024 689 default 1024
620 690
621source "drivers/pci/Kconfig" 691source "drivers/pci/Kconfig"
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index 72dbf2359270..75cb3641ed2f 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -33,6 +33,7 @@ struct alpha_machine_vector
33 33
34 int nr_irqs; 34 int nr_irqs;
35 int rtc_port; 35 int rtc_port;
36 int rtc_boot_cpu_only;
36 unsigned int max_asn; 37 unsigned int max_asn;
37 unsigned long max_isa_dma_address; 38 unsigned long max_isa_dma_address;
38 unsigned long irq_probe_mask; 39 unsigned long irq_probe_mask;
@@ -95,9 +96,6 @@ struct alpha_machine_vector
95 96
96 struct _alpha_agp_info *(*agp_info)(void); 97 struct _alpha_agp_info *(*agp_info)(void);
97 98
98 unsigned int (*rtc_get_time)(struct rtc_time *);
99 int (*rtc_set_time)(struct rtc_time *);
100
101 const char *vector_name; 99 const char *vector_name;
102 100
103 /* NUMA information */ 101 /* NUMA information */
@@ -126,13 +124,19 @@ extern struct alpha_machine_vector alpha_mv;
126 124
127#ifdef CONFIG_ALPHA_GENERIC 125#ifdef CONFIG_ALPHA_GENERIC
128extern int alpha_using_srm; 126extern int alpha_using_srm;
127extern int alpha_using_qemu;
129#else 128#else
130#ifdef CONFIG_ALPHA_SRM 129# ifdef CONFIG_ALPHA_SRM
131#define alpha_using_srm 1 130# define alpha_using_srm 1
132#else 131# else
133#define alpha_using_srm 0 132# define alpha_using_srm 0
134#endif 133# endif
134# ifdef CONFIG_ALPHA_QEMU
135# define alpha_using_qemu 1
136# else
137# define alpha_using_qemu 0
138# endif
135#endif /* GENERIC */ 139#endif /* GENERIC */
136 140
137#endif 141#endif /* __KERNEL__ */
138#endif /* __ALPHA_MACHVEC_H */ 142#endif /* __ALPHA_MACHVEC_H */
diff --git a/arch/alpha/include/asm/pal.h b/arch/alpha/include/asm/pal.h
index 6fcd2b5b08f0..5422a47646fc 100644
--- a/arch/alpha/include/asm/pal.h
+++ b/arch/alpha/include/asm/pal.h
@@ -89,6 +89,7 @@ __CALL_PAL_W1(wrmces, unsigned long);
89__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); 89__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
90__CALL_PAL_W1(wrusp, unsigned long); 90__CALL_PAL_W1(wrusp, unsigned long);
91__CALL_PAL_W1(wrvptptr, unsigned long); 91__CALL_PAL_W1(wrvptptr, unsigned long);
92__CALL_PAL_RW1(wtint, unsigned long, unsigned long);
92 93
93/* 94/*
94 * TB routines.. 95 * TB routines..
@@ -111,5 +112,75 @@ __CALL_PAL_W1(wrvptptr, unsigned long);
111#define tbiap() __tbi(-1, /* no second argument */) 112#define tbiap() __tbi(-1, /* no second argument */)
112#define tbia() __tbi(-2, /* no second argument */) 113#define tbia() __tbi(-2, /* no second argument */)
113 114
115/*
116 * QEMU Cserv routines..
117 */
118
119static inline unsigned long
120qemu_get_walltime(void)
121{
122 register unsigned long v0 __asm__("$0");
123 register unsigned long a0 __asm__("$16") = 3;
124
125 asm("call_pal %2 # cserve get_time"
126 : "=r"(v0), "+r"(a0)
127 : "i"(PAL_cserve)
128 : "$17", "$18", "$19", "$20", "$21");
129
130 return v0;
131}
132
133static inline unsigned long
134qemu_get_alarm(void)
135{
136 register unsigned long v0 __asm__("$0");
137 register unsigned long a0 __asm__("$16") = 4;
138
139 asm("call_pal %2 # cserve get_alarm"
140 : "=r"(v0), "+r"(a0)
141 : "i"(PAL_cserve)
142 : "$17", "$18", "$19", "$20", "$21");
143
144 return v0;
145}
146
147static inline void
148qemu_set_alarm_rel(unsigned long expire)
149{
150 register unsigned long a0 __asm__("$16") = 5;
151 register unsigned long a1 __asm__("$17") = expire;
152
153 asm volatile("call_pal %2 # cserve set_alarm_rel"
154 : "+r"(a0), "+r"(a1)
155 : "i"(PAL_cserve)
156 : "$0", "$18", "$19", "$20", "$21");
157}
158
159static inline void
160qemu_set_alarm_abs(unsigned long expire)
161{
162 register unsigned long a0 __asm__("$16") = 6;
163 register unsigned long a1 __asm__("$17") = expire;
164
165 asm volatile("call_pal %2 # cserve set_alarm_abs"
166 : "+r"(a0), "+r"(a1)
167 : "i"(PAL_cserve)
168 : "$0", "$18", "$19", "$20", "$21");
169}
170
171static inline unsigned long
172qemu_get_vmtime(void)
173{
174 register unsigned long v0 __asm__("$0");
175 register unsigned long a0 __asm__("$16") = 7;
176
177 asm("call_pal %2 # cserve get_time"
178 : "=r"(v0), "+r"(a0)
179 : "i"(PAL_cserve)
180 : "$17", "$18", "$19", "$20", "$21");
181
182 return v0;
183}
184
114#endif /* !__ASSEMBLY__ */ 185#endif /* !__ASSEMBLY__ */
115#endif /* __ALPHA_PAL_H */ 186#endif /* __ALPHA_PAL_H */
diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h
index d70408d36677..f71c3b0ed360 100644
--- a/arch/alpha/include/asm/rtc.h
+++ b/arch/alpha/include/asm/rtc.h
@@ -1,12 +1 @@
1#ifndef _ALPHA_RTC_H
2#define _ALPHA_RTC_H
3
4#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \
5 || defined(CONFIG_ALPHA_GENERIC)
6# define get_rtc_time alpha_mv.rtc_get_time
7# define set_rtc_time alpha_mv.rtc_set_time
8#endif
9
10#include <asm-generic/rtc.h> #include <asm-generic/rtc.h>
11
12#endif
diff --git a/arch/alpha/include/asm/string.h b/arch/alpha/include/asm/string.h
index b02b8a282940..c2911f591704 100644
--- a/arch/alpha/include/asm/string.h
+++ b/arch/alpha/include/asm/string.h
@@ -22,15 +22,27 @@ extern void * __memcpy(void *, const void *, size_t);
22 22
23#define __HAVE_ARCH_MEMSET 23#define __HAVE_ARCH_MEMSET
24extern void * __constant_c_memset(void *, unsigned long, size_t); 24extern void * __constant_c_memset(void *, unsigned long, size_t);
25extern void * ___memset(void *, int, size_t);
25extern void * __memset(void *, int, size_t); 26extern void * __memset(void *, int, size_t);
26extern void * memset(void *, int, size_t); 27extern void * memset(void *, int, size_t);
27 28
28#define memset(s, c, n) \ 29/* For gcc 3.x, we cannot have the inline function named "memset" because
29(__builtin_constant_p(c) \ 30 the __builtin_memset will attempt to resolve to the inline as well,
30 ? (__builtin_constant_p(n) && (c) == 0 \ 31 leading to a "sorry" about unimplemented recursive inlining. */
31 ? __builtin_memset((s),0,(n)) \ 32extern inline void *__memset(void *s, int c, size_t n)
32 : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \ 33{
33 : __memset((s),(c),(n))) 34 if (__builtin_constant_p(c)) {
35 if (__builtin_constant_p(n)) {
36 return __builtin_memset(s, c, n);
37 } else {
38 unsigned long c8 = (c & 0xff) * 0x0101010101010101UL;
39 return __constant_c_memset(s, c8, n);
40 }
41 }
42 return ___memset(s, c, n);
43}
44
45#define memset __memset
34 46
35#define __HAVE_ARCH_STRCPY 47#define __HAVE_ARCH_STRCPY
36extern char * strcpy(char *,const char *); 48extern char * strcpy(char *,const char *);
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 52cd2a4a3ff4..453597b91f3a 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -58,8 +58,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
58#define THREAD_SIZE_ORDER 1 58#define THREAD_SIZE_ORDER 1
59#define THREAD_SIZE (2*PAGE_SIZE) 59#define THREAD_SIZE (2*PAGE_SIZE)
60 60
61#define PREEMPT_ACTIVE 0x40000000
62
63/* 61/*
64 * Thread information flags: 62 * Thread information flags:
65 * - these are process state flags and used from assembly 63 * - these are process state flags and used from assembly
diff --git a/arch/alpha/include/uapi/asm/pal.h b/arch/alpha/include/uapi/asm/pal.h
index 3c0ce08e5f59..dfc8140b9088 100644
--- a/arch/alpha/include/uapi/asm/pal.h
+++ b/arch/alpha/include/uapi/asm/pal.h
@@ -46,6 +46,7 @@
46#define PAL_rdusp 58 46#define PAL_rdusp 58
47#define PAL_whami 60 47#define PAL_whami 60
48#define PAL_retsys 61 48#define PAL_retsys 61
49#define PAL_wtint 62
49#define PAL_rti 63 50#define PAL_rti 63
50 51
51 52
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 84ec46b38f7d..0d54650e78fc 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o
16obj-$(CONFIG_SRM_ENV) += srm_env.o 16obj-$(CONFIG_SRM_ENV) += srm_env.o
17obj-$(CONFIG_MODULES) += module.o 17obj-$(CONFIG_MODULES) += module.o
18obj-$(CONFIG_PERF_EVENTS) += perf_event.o 18obj-$(CONFIG_PERF_EVENTS) += perf_event.o
19obj-$(CONFIG_RTC_DRV_ALPHA) += rtc.o
19 20
20ifdef CONFIG_ALPHA_GENERIC 21ifdef CONFIG_ALPHA_GENERIC
21 22
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index 89566b346c0f..f4c7ab6f43b0 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -40,6 +40,7 @@ EXPORT_SYMBOL(strrchr);
40EXPORT_SYMBOL(memmove); 40EXPORT_SYMBOL(memmove);
41EXPORT_SYMBOL(__memcpy); 41EXPORT_SYMBOL(__memcpy);
42EXPORT_SYMBOL(__memset); 42EXPORT_SYMBOL(__memset);
43EXPORT_SYMBOL(___memset);
43EXPORT_SYMBOL(__memsetw); 44EXPORT_SYMBOL(__memsetw);
44EXPORT_SYMBOL(__constant_c_memset); 45EXPORT_SYMBOL(__constant_c_memset);
45EXPORT_SYMBOL(copy_page); 46EXPORT_SYMBOL(copy_page);
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 28e4429596f3..1c8625cb0e25 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -66,21 +66,7 @@ do_entInt(unsigned long type, unsigned long vector,
66 break; 66 break;
67 case 1: 67 case 1:
68 old_regs = set_irq_regs(regs); 68 old_regs = set_irq_regs(regs);
69#ifdef CONFIG_SMP
70 {
71 long cpu;
72
73 smp_percpu_timer_interrupt(regs);
74 cpu = smp_processor_id();
75 if (cpu != boot_cpuid) {
76 kstat_incr_irqs_this_cpu(RTC_IRQ, irq_to_desc(RTC_IRQ));
77 } else {
78 handle_irq(RTC_IRQ);
79 }
80 }
81#else
82 handle_irq(RTC_IRQ); 69 handle_irq(RTC_IRQ);
83#endif
84 set_irq_regs(old_regs); 70 set_irq_regs(old_regs);
85 return; 71 return;
86 case 2: 72 case 2:
@@ -228,7 +214,7 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
228 */ 214 */
229 215
230struct irqaction timer_irqaction = { 216struct irqaction timer_irqaction = {
231 .handler = timer_interrupt, 217 .handler = rtc_timer_interrupt,
232 .name = "timer", 218 .name = "timer",
233}; 219};
234 220
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index 7fa62488bd16..f54bdf658cd0 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -43,10 +43,7 @@
43#define CAT1(x,y) x##y 43#define CAT1(x,y) x##y
44#define CAT(x,y) CAT1(x,y) 44#define CAT(x,y) CAT1(x,y)
45 45
46#define DO_DEFAULT_RTC \ 46#define DO_DEFAULT_RTC .rtc_port = 0x70
47 .rtc_port = 0x70, \
48 .rtc_get_time = common_get_rtc_time, \
49 .rtc_set_time = common_set_rtc_time
50 47
51#define DO_EV4_MMU \ 48#define DO_EV4_MMU \
52 .max_asn = EV4_MAX_ASN, \ 49 .max_asn = EV4_MAX_ASN, \
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index d821b17047e0..c52e7f0ee5f6 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -83,6 +83,8 @@ struct alpha_pmu_t {
83 long pmc_left[3]; 83 long pmc_left[3];
84 /* Subroutine for allocation of PMCs. Enforces constraints. */ 84 /* Subroutine for allocation of PMCs. Enforces constraints. */
85 int (*check_constraints)(struct perf_event **, unsigned long *, int); 85 int (*check_constraints)(struct perf_event **, unsigned long *, int);
86 /* Subroutine for checking validity of a raw event for this PMU. */
87 int (*raw_event_valid)(u64 config);
86}; 88};
87 89
88/* 90/*
@@ -203,6 +205,12 @@ success:
203} 205}
204 206
205 207
208static int ev67_raw_event_valid(u64 config)
209{
210 return config >= EV67_CYCLES && config < EV67_LAST_ET;
211};
212
213
206static const struct alpha_pmu_t ev67_pmu = { 214static const struct alpha_pmu_t ev67_pmu = {
207 .event_map = ev67_perfmon_event_map, 215 .event_map = ev67_perfmon_event_map,
208 .max_events = ARRAY_SIZE(ev67_perfmon_event_map), 216 .max_events = ARRAY_SIZE(ev67_perfmon_event_map),
@@ -211,7 +219,8 @@ static const struct alpha_pmu_t ev67_pmu = {
211 .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, 219 .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
212 .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, 220 .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
213 .pmc_left = {16, 4, 0}, 221 .pmc_left = {16, 4, 0},
214 .check_constraints = ev67_check_constraints 222 .check_constraints = ev67_check_constraints,
223 .raw_event_valid = ev67_raw_event_valid,
215}; 224};
216 225
217 226
@@ -609,7 +618,9 @@ static int __hw_perf_event_init(struct perf_event *event)
609 } else if (attr->type == PERF_TYPE_HW_CACHE) { 618 } else if (attr->type == PERF_TYPE_HW_CACHE) {
610 return -EOPNOTSUPP; 619 return -EOPNOTSUPP;
611 } else if (attr->type == PERF_TYPE_RAW) { 620 } else if (attr->type == PERF_TYPE_RAW) {
612 ev = attr->config & 0xff; 621 if (!alpha_pmu->raw_event_valid(attr->config))
622 return -EINVAL;
623 ev = attr->config;
613 } else { 624 } else {
614 return -EOPNOTSUPP; 625 return -EOPNOTSUPP;
615 } 626 }
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index f2360a74e5d5..1941a07b5811 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -46,6 +46,23 @@
46void (*pm_power_off)(void) = machine_power_off; 46void (*pm_power_off)(void) = machine_power_off;
47EXPORT_SYMBOL(pm_power_off); 47EXPORT_SYMBOL(pm_power_off);
48 48
49#ifdef CONFIG_ALPHA_WTINT
50/*
51 * Sleep the CPU.
52 * EV6, LCA45 and QEMU know how to power down, skipping N timer interrupts.
53 */
54void arch_cpu_idle(void)
55{
56 wtint(0);
57 local_irq_enable();
58}
59
60void arch_cpu_idle_dead(void)
61{
62 wtint(INT_MAX);
63}
64#endif /* ALPHA_WTINT */
65
49struct halt_info { 66struct halt_info {
50 int mode; 67 int mode;
51 char *restart_cmd; 68 char *restart_cmd;
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index d3e52d3fd592..da2d6ec9c370 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -135,17 +135,15 @@ extern void unregister_srm_console(void);
135/* smp.c */ 135/* smp.c */
136extern void setup_smp(void); 136extern void setup_smp(void);
137extern void handle_ipi(struct pt_regs *); 137extern void handle_ipi(struct pt_regs *);
138extern void smp_percpu_timer_interrupt(struct pt_regs *);
139 138
140/* bios32.c */ 139/* bios32.c */
141/* extern void reset_for_srm(void); */ 140/* extern void reset_for_srm(void); */
142 141
143/* time.c */ 142/* time.c */
144extern irqreturn_t timer_interrupt(int irq, void *dev); 143extern irqreturn_t rtc_timer_interrupt(int irq, void *dev);
144extern void init_clockevent(void);
145extern void common_init_rtc(void); 145extern void common_init_rtc(void);
146extern unsigned long est_cycle_freq; 146extern unsigned long est_cycle_freq;
147extern unsigned int common_get_rtc_time(struct rtc_time *time);
148extern int common_set_rtc_time(struct rtc_time *time);
149 147
150/* smc37c93x.c */ 148/* smc37c93x.c */
151extern void SMC93x_Init(void); 149extern void SMC93x_Init(void);
diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c
new file mode 100644
index 000000000000..c8d284d8521f
--- /dev/null
+++ b/arch/alpha/kernel/rtc.c
@@ -0,0 +1,323 @@
1/*
2 * linux/arch/alpha/kernel/rtc.c
3 *
4 * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds
5 *
6 * This file contains date handling.
7 */
8#include <linux/errno.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/param.h>
12#include <linux/string.h>
13#include <linux/mc146818rtc.h>
14#include <linux/bcd.h>
15#include <linux/rtc.h>
16#include <linux/platform_device.h>
17
18#include <asm/rtc.h>
19
20#include "proto.h"
21
22
23/*
24 * Support for the RTC device.
25 *
26 * We don't want to use the rtc-cmos driver, because we don't want to support
27 * alarms, as that would be indistinguishable from timer interrupts.
28 *
29 * Further, generic code is really, really tied to a 1900 epoch. This is
30 * true in __get_rtc_time as well as the users of struct rtc_time e.g.
31 * rtc_tm_to_time. Thankfully all of the other epochs in use are later
32 * than 1900, and so it's easy to adjust.
33 */
34
35static unsigned long rtc_epoch;
36
37static int __init
38specifiy_epoch(char *str)
39{
40 unsigned long epoch = simple_strtoul(str, NULL, 0);
41 if (epoch < 1900)
42 printk("Ignoring invalid user specified epoch %lu\n", epoch);
43 else
44 rtc_epoch = epoch;
45 return 1;
46}
47__setup("epoch=", specifiy_epoch);
48
49static void __init
50init_rtc_epoch(void)
51{
52 int epoch, year, ctrl;
53
54 if (rtc_epoch != 0) {
55 /* The epoch was specified on the command-line. */
56 return;
57 }
58
59 /* Detect the epoch in use on this computer. */
60 ctrl = CMOS_READ(RTC_CONTROL);
61 year = CMOS_READ(RTC_YEAR);
62 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
63 year = bcd2bin(year);
64
65 /* PC-like is standard; used for year >= 70 */
66 epoch = 1900;
67 if (year < 20) {
68 epoch = 2000;
69 } else if (year >= 20 && year < 48) {
70 /* NT epoch */
71 epoch = 1980;
72 } else if (year >= 48 && year < 70) {
73 /* Digital UNIX epoch */
74 epoch = 1952;
75 }
76 rtc_epoch = epoch;
77
78 printk(KERN_INFO "Using epoch %d for rtc year %d\n", epoch, year);
79}
80
81static int
82alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
83{
84 __get_rtc_time(tm);
85
86 /* Adjust for non-default epochs. It's easier to depend on the
87 generic __get_rtc_time and adjust the epoch here than create
88 a copy of __get_rtc_time with the edits we need. */
89 if (rtc_epoch != 1900) {
90 int year = tm->tm_year;
91 /* Undo the century adjustment made in __get_rtc_time. */
92 if (year >= 100)
93 year -= 100;
94 year += rtc_epoch - 1900;
95 /* Redo the century adjustment with the epoch in place. */
96 if (year <= 69)
97 year += 100;
98 tm->tm_year = year;
99 }
100
101 return rtc_valid_tm(tm);
102}
103
104static int
105alpha_rtc_set_time(struct device *dev, struct rtc_time *tm)
106{
107 struct rtc_time xtm;
108
109 if (rtc_epoch != 1900) {
110 xtm = *tm;
111 xtm.tm_year -= rtc_epoch - 1900;
112 tm = &xtm;
113 }
114
115 return __set_rtc_time(tm);
116}
117
118static int
119alpha_rtc_set_mmss(struct device *dev, unsigned long nowtime)
120{
121 int retval = 0;
122 int real_seconds, real_minutes, cmos_minutes;
123 unsigned char save_control, save_freq_select;
124
125 /* Note: This code only updates minutes and seconds. Comments
126 indicate this was to avoid messing with unknown time zones,
127 and with the epoch nonsense described above. In order for
128 this to work, the existing clock cannot be off by more than
129 15 minutes.
130
131 ??? This choice is may be out of date. The x86 port does
132 not have problems with timezones, and the epoch processing has
133 now been fixed in alpha_set_rtc_time.
134
135 In either case, one can always force a full rtc update with
136 the userland hwclock program, so surely 15 minute accuracy
137 is no real burden. */
138
139 /* In order to set the CMOS clock precisely, we have to be called
140 500 ms after the second nowtime has started, because when
141 nowtime is written into the registers of the CMOS clock, it will
142 jump to the next second precisely 500 ms later. Check the Motorola
143 MC146818A or Dallas DS12887 data sheet for details. */
144
145 /* irq are locally disabled here */
146 spin_lock(&rtc_lock);
147 /* Tell the clock it's being set */
148 save_control = CMOS_READ(RTC_CONTROL);
149 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
150
151 /* Stop and reset prescaler */
152 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
153 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
154
155 cmos_minutes = CMOS_READ(RTC_MINUTES);
156 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
157 cmos_minutes = bcd2bin(cmos_minutes);
158
159 real_seconds = nowtime % 60;
160 real_minutes = nowtime / 60;
161 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1) {
162 /* correct for half hour time zone */
163 real_minutes += 30;
164 }
165 real_minutes %= 60;
166
167 if (abs(real_minutes - cmos_minutes) < 30) {
168 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
169 real_seconds = bin2bcd(real_seconds);
170 real_minutes = bin2bcd(real_minutes);
171 }
172 CMOS_WRITE(real_seconds,RTC_SECONDS);
173 CMOS_WRITE(real_minutes,RTC_MINUTES);
174 } else {
175 printk_once(KERN_NOTICE
176 "set_rtc_mmss: can't update from %d to %d\n",
177 cmos_minutes, real_minutes);
178 retval = -1;
179 }
180
181 /* The following flags have to be released exactly in this order,
182 * otherwise the DS12887 (popular MC146818A clone with integrated
183 * battery and quartz) will not reset the oscillator and will not
184 * update precisely 500 ms later. You won't find this mentioned in
185 * the Dallas Semiconductor data sheets, but who believes data
186 * sheets anyway ... -- Markus Kuhn
187 */
188 CMOS_WRITE(save_control, RTC_CONTROL);
189 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
190 spin_unlock(&rtc_lock);
191
192 return retval;
193}
194
195static int
196alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
197{
198 switch (cmd) {
199 case RTC_EPOCH_READ:
200 return put_user(rtc_epoch, (unsigned long __user *)arg);
201 case RTC_EPOCH_SET:
202 if (arg < 1900)
203 return -EINVAL;
204 rtc_epoch = arg;
205 return 0;
206 default:
207 return -ENOIOCTLCMD;
208 }
209}
210
211static const struct rtc_class_ops alpha_rtc_ops = {
212 .read_time = alpha_rtc_read_time,
213 .set_time = alpha_rtc_set_time,
214 .set_mmss = alpha_rtc_set_mmss,
215 .ioctl = alpha_rtc_ioctl,
216};
217
218/*
219 * Similarly, except do the actual CMOS access on the boot cpu only.
220 * This requires marshalling the data across an interprocessor call.
221 */
222
223#if defined(CONFIG_SMP) && \
224 (defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_MARVEL))
225# define HAVE_REMOTE_RTC 1
226
227union remote_data {
228 struct rtc_time *tm;
229 unsigned long now;
230 long retval;
231};
232
233static void
234do_remote_read(void *data)
235{
236 union remote_data *x = data;
237 x->retval = alpha_rtc_read_time(NULL, x->tm);
238}
239
240static int
241remote_read_time(struct device *dev, struct rtc_time *tm)
242{
243 union remote_data x;
244 if (smp_processor_id() != boot_cpuid) {
245 x.tm = tm;
246 smp_call_function_single(boot_cpuid, do_remote_read, &x, 1);
247 return x.retval;
248 }
249 return alpha_rtc_read_time(NULL, tm);
250}
251
252static void
253do_remote_set(void *data)
254{
255 union remote_data *x = data;
256 x->retval = alpha_rtc_set_time(NULL, x->tm);
257}
258
259static int
260remote_set_time(struct device *dev, struct rtc_time *tm)
261{
262 union remote_data x;
263 if (smp_processor_id() != boot_cpuid) {
264 x.tm = tm;
265 smp_call_function_single(boot_cpuid, do_remote_set, &x, 1);
266 return x.retval;
267 }
268 return alpha_rtc_set_time(NULL, tm);
269}
270
271static void
272do_remote_mmss(void *data)
273{
274 union remote_data *x = data;
275 x->retval = alpha_rtc_set_mmss(NULL, x->now);
276}
277
278static int
279remote_set_mmss(struct device *dev, unsigned long now)
280{
281 union remote_data x;
282 if (smp_processor_id() != boot_cpuid) {
283 x.now = now;
284 smp_call_function_single(boot_cpuid, do_remote_mmss, &x, 1);
285 return x.retval;
286 }
287 return alpha_rtc_set_mmss(NULL, now);
288}
289
290static const struct rtc_class_ops remote_rtc_ops = {
291 .read_time = remote_read_time,
292 .set_time = remote_set_time,
293 .set_mmss = remote_set_mmss,
294 .ioctl = alpha_rtc_ioctl,
295};
296#endif
297
298static int __init
299alpha_rtc_init(void)
300{
301 const struct rtc_class_ops *ops;
302 struct platform_device *pdev;
303 struct rtc_device *rtc;
304 const char *name;
305
306 init_rtc_epoch();
307 name = "rtc-alpha";
308 ops = &alpha_rtc_ops;
309
310#ifdef HAVE_REMOTE_RTC
311 if (alpha_mv.rtc_boot_cpu_only)
312 ops = &remote_rtc_ops;
313#endif
314
315 pdev = platform_device_register_simple(name, -1, NULL, 0);
316 rtc = devm_rtc_device_register(&pdev->dev, name, ops, THIS_MODULE);
317 if (IS_ERR(rtc))
318 return PTR_ERR(rtc);
319
320 platform_set_drvdata(pdev, rtc);
321 return 0;
322}
323device_initcall(alpha_rtc_init);
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 9e3107cc5ebb..b20af76f12c1 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -115,10 +115,17 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
115 115
116#ifdef CONFIG_ALPHA_GENERIC 116#ifdef CONFIG_ALPHA_GENERIC
117struct alpha_machine_vector alpha_mv; 117struct alpha_machine_vector alpha_mv;
118#endif
119
120#ifndef alpha_using_srm
118int alpha_using_srm; 121int alpha_using_srm;
119EXPORT_SYMBOL(alpha_using_srm); 122EXPORT_SYMBOL(alpha_using_srm);
120#endif 123#endif
121 124
125#ifndef alpha_using_qemu
126int alpha_using_qemu;
127#endif
128
122static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long, 129static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
123 unsigned long); 130 unsigned long);
124static struct alpha_machine_vector *get_sysvec_byname(const char *); 131static struct alpha_machine_vector *get_sysvec_byname(const char *);
@@ -529,11 +536,15 @@ setup_arch(char **cmdline_p)
529 atomic_notifier_chain_register(&panic_notifier_list, 536 atomic_notifier_chain_register(&panic_notifier_list,
530 &alpha_panic_block); 537 &alpha_panic_block);
531 538
532#ifdef CONFIG_ALPHA_GENERIC 539#ifndef alpha_using_srm
533 /* Assume that we've booted from SRM if we haven't booted from MILO. 540 /* Assume that we've booted from SRM if we haven't booted from MILO.
534 Detect the later by looking for "MILO" in the system serial nr. */ 541 Detect the later by looking for "MILO" in the system serial nr. */
535 alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0; 542 alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
536#endif 543#endif
544#ifndef alpha_using_qemu
545 /* Similarly, look for QEMU. */
546 alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0;
547#endif
537 548
538 /* If we are using SRM, we want to allow callbacks 549 /* If we are using SRM, we want to allow callbacks
539 as early as possible, so do this NOW, and then 550 as early as possible, so do this NOW, and then
@@ -1207,6 +1218,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
1207 char *systype_name; 1218 char *systype_name;
1208 char *sysvariation_name; 1219 char *sysvariation_name;
1209 int nr_processors; 1220 int nr_processors;
1221 unsigned long timer_freq;
1210 1222
1211 cpu_index = (unsigned) (cpu->type - 1); 1223 cpu_index = (unsigned) (cpu->type - 1);
1212 cpu_name = "Unknown"; 1224 cpu_name = "Unknown";
@@ -1218,6 +1230,12 @@ show_cpuinfo(struct seq_file *f, void *slot)
1218 1230
1219 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors); 1231 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
1220 1232
1233#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
1234 timer_freq = (100UL * hwrpb->intr_freq) / 4096;
1235#else
1236 timer_freq = 100UL * CONFIG_HZ;
1237#endif
1238
1221 seq_printf(f, "cpu\t\t\t: Alpha\n" 1239 seq_printf(f, "cpu\t\t\t: Alpha\n"
1222 "cpu model\t\t: %s\n" 1240 "cpu model\t\t: %s\n"
1223 "cpu variation\t\t: %ld\n" 1241 "cpu variation\t\t: %ld\n"
@@ -1243,8 +1261,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
1243 (char*)hwrpb->ssn, 1261 (char*)hwrpb->ssn,
1244 est_cycle_freq ? : hwrpb->cycle_freq, 1262 est_cycle_freq ? : hwrpb->cycle_freq,
1245 est_cycle_freq ? "est." : "", 1263 est_cycle_freq ? "est." : "",
1246 hwrpb->intr_freq / 4096, 1264 timer_freq / 100, timer_freq % 100,
1247 (100 * hwrpb->intr_freq / 4096) % 100,
1248 hwrpb->pagesize, 1265 hwrpb->pagesize,
1249 hwrpb->pa_bits, 1266 hwrpb->pa_bits,
1250 hwrpb->max_asn, 1267 hwrpb->max_asn,
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 9dbbcb3b9146..99ac36d5de4e 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -138,9 +138,11 @@ smp_callin(void)
138 138
139 /* Get our local ticker going. */ 139 /* Get our local ticker going. */
140 smp_setup_percpu_timer(cpuid); 140 smp_setup_percpu_timer(cpuid);
141 init_clockevent();
141 142
142 /* Call platform-specific callin, if specified */ 143 /* Call platform-specific callin, if specified */
143 if (alpha_mv.smp_callin) alpha_mv.smp_callin(); 144 if (alpha_mv.smp_callin)
145 alpha_mv.smp_callin();
144 146
145 /* All kernel threads share the same mm context. */ 147 /* All kernel threads share the same mm context. */
146 atomic_inc(&init_mm.mm_count); 148 atomic_inc(&init_mm.mm_count);
@@ -498,35 +500,6 @@ smp_cpus_done(unsigned int max_cpus)
498 ((bogosum + 2500) / (5000/HZ)) % 100); 500 ((bogosum + 2500) / (5000/HZ)) % 100);
499} 501}
500 502
501
502void
503smp_percpu_timer_interrupt(struct pt_regs *regs)
504{
505 struct pt_regs *old_regs;
506 int cpu = smp_processor_id();
507 unsigned long user = user_mode(regs);
508 struct cpuinfo_alpha *data = &cpu_data[cpu];
509
510 old_regs = set_irq_regs(regs);
511
512 /* Record kernel PC. */
513 profile_tick(CPU_PROFILING);
514
515 if (!--data->prof_counter) {
516 /* We need to make like a normal interrupt -- otherwise
517 timer interrupts ignore the global interrupt lock,
518 which would be a Bad Thing. */
519 irq_enter();
520
521 update_process_times(user);
522
523 data->prof_counter = data->prof_multiplier;
524
525 irq_exit();
526 }
527 set_irq_regs(old_regs);
528}
529
530int 503int
531setup_profiling_timer(unsigned int multiplier) 504setup_profiling_timer(unsigned int multiplier)
532{ 505{
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 5a0af11b3a61..608f2a7fa0a3 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -224,8 +224,6 @@ struct alpha_machine_vector jensen_mv __initmv = {
224 .machine_check = jensen_machine_check, 224 .machine_check = jensen_machine_check,
225 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, 225 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
226 .rtc_port = 0x170, 226 .rtc_port = 0x170,
227 .rtc_get_time = common_get_rtc_time,
228 .rtc_set_time = common_set_rtc_time,
229 227
230 .nr_irqs = 16, 228 .nr_irqs = 16,
231 .device_interrupt = jensen_device_interrupt, 229 .device_interrupt = jensen_device_interrupt,
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index c92e389ff219..f21d61fab678 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -22,7 +22,6 @@
22#include <asm/hwrpb.h> 22#include <asm/hwrpb.h>
23#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
24#include <asm/vga.h> 24#include <asm/vga.h>
25#include <asm/rtc.h>
26 25
27#include "proto.h" 26#include "proto.h"
28#include "err_impl.h" 27#include "err_impl.h"
@@ -400,57 +399,6 @@ marvel_init_rtc(void)
400 init_rtc_irq(); 399 init_rtc_irq();
401} 400}
402 401
403struct marvel_rtc_time {
404 struct rtc_time *time;
405 int retval;
406};
407
408#ifdef CONFIG_SMP
409static void
410smp_get_rtc_time(void *data)
411{
412 struct marvel_rtc_time *mrt = data;
413 mrt->retval = __get_rtc_time(mrt->time);
414}
415
416static void
417smp_set_rtc_time(void *data)
418{
419 struct marvel_rtc_time *mrt = data;
420 mrt->retval = __set_rtc_time(mrt->time);
421}
422#endif
423
424static unsigned int
425marvel_get_rtc_time(struct rtc_time *time)
426{
427#ifdef CONFIG_SMP
428 struct marvel_rtc_time mrt;
429
430 if (smp_processor_id() != boot_cpuid) {
431 mrt.time = time;
432 smp_call_function_single(boot_cpuid, smp_get_rtc_time, &mrt, 1);
433 return mrt.retval;
434 }
435#endif
436 return __get_rtc_time(time);
437}
438
439static int
440marvel_set_rtc_time(struct rtc_time *time)
441{
442#ifdef CONFIG_SMP
443 struct marvel_rtc_time mrt;
444
445 if (smp_processor_id() != boot_cpuid) {
446 mrt.time = time;
447 smp_call_function_single(boot_cpuid, smp_set_rtc_time, &mrt, 1);
448 return mrt.retval;
449 }
450#endif
451 return __set_rtc_time(time);
452}
453
454static void 402static void
455marvel_smp_callin(void) 403marvel_smp_callin(void)
456{ 404{
@@ -492,8 +440,7 @@ struct alpha_machine_vector marvel_ev7_mv __initmv = {
492 .vector_name = "MARVEL/EV7", 440 .vector_name = "MARVEL/EV7",
493 DO_EV7_MMU, 441 DO_EV7_MMU,
494 .rtc_port = 0x70, 442 .rtc_port = 0x70,
495 .rtc_get_time = marvel_get_rtc_time, 443 .rtc_boot_cpu_only = 1,
496 .rtc_set_time = marvel_set_rtc_time,
497 DO_MARVEL_IO, 444 DO_MARVEL_IO,
498 .machine_check = marvel_machine_check, 445 .machine_check = marvel_machine_check,
499 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, 446 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index ea3395036556..ee39cee8064c 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -3,13 +3,7 @@
3 * 3 *
4 * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds 4 * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds
5 * 5 *
6 * This file contains the PC-specific time handling details: 6 * This file contains the clocksource time handling.
7 * reading the RTC at bootup, etc..
8 * 1994-07-02 Alan Modra
9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
10 * 1995-03-26 Markus Kuhn
11 * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
12 * precision CMOS clock update
13 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 7 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
14 * "A Kernel Model for Precision Timekeeping" by Dave Mills 8 * "A Kernel Model for Precision Timekeeping" by Dave Mills
15 * 1997-01-09 Adrian Sun 9 * 1997-01-09 Adrian Sun
@@ -21,9 +15,6 @@
21 * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net) 15 * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net)
22 * fixed algorithm in do_gettimeofday() for calculating the precise time 16 * fixed algorithm in do_gettimeofday() for calculating the precise time
23 * from processor cycle counter (now taking lost_ticks into account) 17 * from processor cycle counter (now taking lost_ticks into account)
24 * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de>
25 * Fixed time_init to be aware of epoches != 1900. This prevents
26 * booting up in 2048 for me;) Code is stolen from rtc.c.
27 * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com> 18 * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com>
28 * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM 19 * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
29 */ 20 */
@@ -46,40 +37,19 @@
46#include <asm/uaccess.h> 37#include <asm/uaccess.h>
47#include <asm/io.h> 38#include <asm/io.h>
48#include <asm/hwrpb.h> 39#include <asm/hwrpb.h>
49#include <asm/rtc.h>
50 40
51#include <linux/mc146818rtc.h> 41#include <linux/mc146818rtc.h>
52#include <linux/time.h> 42#include <linux/time.h>
53#include <linux/timex.h> 43#include <linux/timex.h>
54#include <linux/clocksource.h> 44#include <linux/clocksource.h>
45#include <linux/clockchips.h>
55 46
56#include "proto.h" 47#include "proto.h"
57#include "irq_impl.h" 48#include "irq_impl.h"
58 49
59static int set_rtc_mmss(unsigned long);
60
61DEFINE_SPINLOCK(rtc_lock); 50DEFINE_SPINLOCK(rtc_lock);
62EXPORT_SYMBOL(rtc_lock); 51EXPORT_SYMBOL(rtc_lock);
63 52
64#define TICK_SIZE (tick_nsec / 1000)
65
66/*
67 * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting
68 * by 48 gives us 16 bits for HZ while keeping the accuracy good even
69 * for large CPU clock rates.
70 */
71#define FIX_SHIFT 48
72
73/* lump static variables together for more efficient access: */
74static struct {
75 /* cycle counter last time it got invoked */
76 __u32 last_time;
77 /* ticks/cycle * 2^48 */
78 unsigned long scaled_ticks_per_cycle;
79 /* partial unused tick */
80 unsigned long partial_tick;
81} state;
82
83unsigned long est_cycle_freq; 53unsigned long est_cycle_freq;
84 54
85#ifdef CONFIG_IRQ_WORK 55#ifdef CONFIG_IRQ_WORK
@@ -108,109 +78,156 @@ static inline __u32 rpcc(void)
108 return __builtin_alpha_rpcc(); 78 return __builtin_alpha_rpcc();
109} 79}
110 80
111int update_persistent_clock(struct timespec now)
112{
113 return set_rtc_mmss(now.tv_sec);
114}
115 81
116void read_persistent_clock(struct timespec *ts) 82
83/*
84 * The RTC as a clock_event_device primitive.
85 */
86
87static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);
88
89irqreturn_t
90rtc_timer_interrupt(int irq, void *dev)
117{ 91{
118 unsigned int year, mon, day, hour, min, sec, epoch; 92 int cpu = smp_processor_id();
119 93 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
120 sec = CMOS_READ(RTC_SECONDS);
121 min = CMOS_READ(RTC_MINUTES);
122 hour = CMOS_READ(RTC_HOURS);
123 day = CMOS_READ(RTC_DAY_OF_MONTH);
124 mon = CMOS_READ(RTC_MONTH);
125 year = CMOS_READ(RTC_YEAR);
126
127 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
128 sec = bcd2bin(sec);
129 min = bcd2bin(min);
130 hour = bcd2bin(hour);
131 day = bcd2bin(day);
132 mon = bcd2bin(mon);
133 year = bcd2bin(year);
134 }
135 94
136 /* PC-like is standard; used for year >= 70 */ 95 /* Don't run the hook for UNUSED or SHUTDOWN. */
137 epoch = 1900; 96 if (likely(ce->mode == CLOCK_EVT_MODE_PERIODIC))
138 if (year < 20) 97 ce->event_handler(ce);
139 epoch = 2000;
140 else if (year >= 20 && year < 48)
141 /* NT epoch */
142 epoch = 1980;
143 else if (year >= 48 && year < 70)
144 /* Digital UNIX epoch */
145 epoch = 1952;
146 98
147 printk(KERN_INFO "Using epoch = %d\n", epoch); 99 if (test_irq_work_pending()) {
100 clear_irq_work_pending();
101 irq_work_run();
102 }
148 103
149 if ((year += epoch) < 1970) 104 return IRQ_HANDLED;
150 year += 100; 105}
151 106
152 ts->tv_sec = mktime(year, mon, day, hour, min, sec); 107static void
153 ts->tv_nsec = 0; 108rtc_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
109{
110 /* The mode member of CE is updated in generic code.
111 Since we only support periodic events, nothing to do. */
112}
113
114static int
115rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
116{
117 /* This hook is for oneshot mode, which we don't support. */
118 return -EINVAL;
154} 119}
155 120
121static void __init
122init_rtc_clockevent(void)
123{
124 int cpu = smp_processor_id();
125 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
126
127 *ce = (struct clock_event_device){
128 .name = "rtc",
129 .features = CLOCK_EVT_FEAT_PERIODIC,
130 .rating = 100,
131 .cpumask = cpumask_of(cpu),
132 .set_mode = rtc_ce_set_mode,
133 .set_next_event = rtc_ce_set_next_event,
134 };
156 135
136 clockevents_config_and_register(ce, CONFIG_HZ, 0, 0);
137}
157 138
139
158/* 140/*
159 * timer_interrupt() needs to keep up the real-time clock, 141 * The QEMU clock as a clocksource primitive.
160 * as well as call the "xtime_update()" routine every clocktick
161 */ 142 */
162irqreturn_t timer_interrupt(int irq, void *dev) 143
144static cycle_t
145qemu_cs_read(struct clocksource *cs)
163{ 146{
164 unsigned long delta; 147 return qemu_get_vmtime();
165 __u32 now; 148}
166 long nticks;
167 149
168#ifndef CONFIG_SMP 150static struct clocksource qemu_cs = {
169 /* Not SMP, do kernel PC profiling here. */ 151 .name = "qemu",
170 profile_tick(CPU_PROFILING); 152 .rating = 400,
171#endif 153 .read = qemu_cs_read,
154 .mask = CLOCKSOURCE_MASK(64),
155 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
156 .max_idle_ns = LONG_MAX
157};
172 158
173 /*
174 * Calculate how many ticks have passed since the last update,
175 * including any previous partial leftover. Save any resulting
176 * fraction for the next pass.
177 */
178 now = rpcc();
179 delta = now - state.last_time;
180 state.last_time = now;
181 delta = delta * state.scaled_ticks_per_cycle + state.partial_tick;
182 state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1);
183 nticks = delta >> FIX_SHIFT;
184 159
185 if (nticks) 160/*
186 xtime_update(nticks); 161 * The QEMU alarm as a clock_event_device primitive.
162 */
187 163
188 if (test_irq_work_pending()) { 164static void
189 clear_irq_work_pending(); 165qemu_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
190 irq_work_run(); 166{
191 } 167 /* The mode member of CE is updated for us in generic code.
168 Just make sure that the event is disabled. */
169 qemu_set_alarm_abs(0);
170}
192 171
193#ifndef CONFIG_SMP 172static int
194 while (nticks--) 173qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
195 update_process_times(user_mode(get_irq_regs())); 174{
196#endif 175 qemu_set_alarm_rel(evt);
176 return 0;
177}
197 178
179static irqreturn_t
180qemu_timer_interrupt(int irq, void *dev)
181{
182 int cpu = smp_processor_id();
183 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
184
185 ce->event_handler(ce);
198 return IRQ_HANDLED; 186 return IRQ_HANDLED;
199} 187}
200 188
189static void __init
190init_qemu_clockevent(void)
191{
192 int cpu = smp_processor_id();
193 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
194
195 *ce = (struct clock_event_device){
196 .name = "qemu",
197 .features = CLOCK_EVT_FEAT_ONESHOT,
198 .rating = 400,
199 .cpumask = cpumask_of(cpu),
200 .set_mode = qemu_ce_set_mode,
201 .set_next_event = qemu_ce_set_next_event,
202 };
203
204 clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX);
205}
206
207
201void __init 208void __init
202common_init_rtc(void) 209common_init_rtc(void)
203{ 210{
204 unsigned char x; 211 unsigned char x, sel = 0;
205 212
206 /* Reset periodic interrupt frequency. */ 213 /* Reset periodic interrupt frequency. */
207 x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; 214#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
208 /* Test includes known working values on various platforms 215 x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
209 where 0x26 is wrong; we refuse to change those. */ 216 /* Test includes known working values on various platforms
210 if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { 217 where 0x26 is wrong; we refuse to change those. */
211 printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x); 218 if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
212 CMOS_WRITE(0x26, RTC_FREQ_SELECT); 219 sel = RTC_REF_CLCK_32KHZ + 6;
213 } 220 }
221#elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32
222 sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ);
223#else
224# error "Unknown HZ from arch/alpha/Kconfig"
225#endif
226 if (sel) {
227 printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n",
228 CONFIG_HZ, sel);
229 CMOS_WRITE(sel, RTC_FREQ_SELECT);
230 }
214 231
215 /* Turn on periodic interrupts. */ 232 /* Turn on periodic interrupts. */
216 x = CMOS_READ(RTC_CONTROL); 233 x = CMOS_READ(RTC_CONTROL);
@@ -233,16 +250,37 @@ common_init_rtc(void)
233 init_rtc_irq(); 250 init_rtc_irq();
234} 251}
235 252
236unsigned int common_get_rtc_time(struct rtc_time *time) 253
237{ 254#ifndef CONFIG_ALPHA_WTINT
238 return __get_rtc_time(time); 255/*
239} 256 * The RPCC as a clocksource primitive.
257 *
258 * While we have free-running timecounters running on all CPUs, and we make
259 * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter
260 * with the wall clock, that initialization isn't kept up-to-date across
261 * different time counters in SMP mode. Therefore we can only use this
262 * method when there's only one CPU enabled.
263 *
264 * When using the WTINT PALcall, the RPCC may shift to a lower frequency,
265 * or stop altogether, while waiting for the interrupt. Therefore we cannot
266 * use this method when WTINT is in use.
267 */
240 268
241int common_set_rtc_time(struct rtc_time *time) 269static cycle_t read_rpcc(struct clocksource *cs)
242{ 270{
243 return __set_rtc_time(time); 271 return rpcc();
244} 272}
245 273
274static struct clocksource clocksource_rpcc = {
275 .name = "rpcc",
276 .rating = 300,
277 .read = read_rpcc,
278 .mask = CLOCKSOURCE_MASK(32),
279 .flags = CLOCK_SOURCE_IS_CONTINUOUS
280};
281#endif /* ALPHA_WTINT */
282
283
246/* Validate a computed cycle counter result against the known bounds for 284/* Validate a computed cycle counter result against the known bounds for
247 the given processor core. There's too much brokenness in the way of 285 the given processor core. There's too much brokenness in the way of
248 timing hardware for any one method to work everywhere. :-( 286 timing hardware for any one method to work everywhere. :-(
@@ -353,33 +391,6 @@ rpcc_after_update_in_progress(void)
353 return rpcc(); 391 return rpcc();
354} 392}
355 393
356#ifndef CONFIG_SMP
357/* Until and unless we figure out how to get cpu cycle counters
358 in sync and keep them there, we can't use the rpcc. */
359static cycle_t read_rpcc(struct clocksource *cs)
360{
361 cycle_t ret = (cycle_t)rpcc();
362 return ret;
363}
364
365static struct clocksource clocksource_rpcc = {
366 .name = "rpcc",
367 .rating = 300,
368 .read = read_rpcc,
369 .mask = CLOCKSOURCE_MASK(32),
370 .flags = CLOCK_SOURCE_IS_CONTINUOUS
371};
372
373static inline void register_rpcc_clocksource(long cycle_freq)
374{
375 clocksource_register_hz(&clocksource_rpcc, cycle_freq);
376}
377#else /* !CONFIG_SMP */
378static inline void register_rpcc_clocksource(long cycle_freq)
379{
380}
381#endif /* !CONFIG_SMP */
382
383void __init 394void __init
384time_init(void) 395time_init(void)
385{ 396{
@@ -387,6 +398,15 @@ time_init(void)
387 unsigned long cycle_freq, tolerance; 398 unsigned long cycle_freq, tolerance;
388 long diff; 399 long diff;
389 400
401 if (alpha_using_qemu) {
402 clocksource_register_hz(&qemu_cs, NSEC_PER_SEC);
403 init_qemu_clockevent();
404
405 timer_irqaction.handler = qemu_timer_interrupt;
406 init_rtc_irq();
407 return;
408 }
409
390 /* Calibrate CPU clock -- attempt #1. */ 410 /* Calibrate CPU clock -- attempt #1. */
391 if (!est_cycle_freq) 411 if (!est_cycle_freq)
392 est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); 412 est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
@@ -421,100 +441,25 @@ time_init(void)
421 "and unable to estimate a proper value!\n"); 441 "and unable to estimate a proper value!\n");
422 } 442 }
423 443
424 /* From John Bowman <bowman@math.ualberta.ca>: allow the values 444 /* See above for restrictions on using clocksource_rpcc. */
425 to settle, as the Update-In-Progress bit going low isn't good 445#ifndef CONFIG_ALPHA_WTINT
426 enough on some hardware. 2ms is our guess; we haven't found 446 if (hwrpb->nr_processors == 1)
427 bogomips yet, but this is close on a 500Mhz box. */ 447 clocksource_register_hz(&clocksource_rpcc, cycle_freq);
428 __delay(1000000); 448#endif
429
430
431 if (HZ > (1<<16)) {
432 extern void __you_loose (void);
433 __you_loose();
434 }
435
436 register_rpcc_clocksource(cycle_freq);
437
438 state.last_time = cc1;
439 state.scaled_ticks_per_cycle
440 = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
441 state.partial_tick = 0L;
442 449
443 /* Startup the timer source. */ 450 /* Startup the timer source. */
444 alpha_mv.init_rtc(); 451 alpha_mv.init_rtc();
452 init_rtc_clockevent();
445} 453}
446 454
447/* 455/* Initialize the clock_event_device for secondary cpus. */
448 * In order to set the CMOS clock precisely, set_rtc_mmss has to be 456#ifdef CONFIG_SMP
449 * called 500 ms after the second nowtime has started, because when 457void __init
450 * nowtime is written into the registers of the CMOS clock, it will 458init_clockevent(void)
451 * jump to the next second precisely 500 ms later. Check the Motorola
452 * MC146818A or Dallas DS12887 data sheet for details.
453 *
454 * BUG: This routine does not handle hour overflow properly; it just
455 * sets the minutes. Usually you won't notice until after reboot!
456 */
457
458
459static int
460set_rtc_mmss(unsigned long nowtime)
461{ 459{
462 int retval = 0; 460 if (alpha_using_qemu)
463 int real_seconds, real_minutes, cmos_minutes; 461 init_qemu_clockevent();
464 unsigned char save_control, save_freq_select; 462 else
465 463 init_rtc_clockevent();
466 /* irq are locally disabled here */
467 spin_lock(&rtc_lock);
468 /* Tell the clock it's being set */
469 save_control = CMOS_READ(RTC_CONTROL);
470 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
471
472 /* Stop and reset prescaler */
473 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
474 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
475
476 cmos_minutes = CMOS_READ(RTC_MINUTES);
477 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
478 cmos_minutes = bcd2bin(cmos_minutes);
479
480 /*
481 * since we're only adjusting minutes and seconds,
482 * don't interfere with hour overflow. This avoids
483 * messing with unknown time zones but requires your
484 * RTC not to be off by more than 15 minutes
485 */
486 real_seconds = nowtime % 60;
487 real_minutes = nowtime / 60;
488 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) {
489 /* correct for half hour time zone */
490 real_minutes += 30;
491 }
492 real_minutes %= 60;
493
494 if (abs(real_minutes - cmos_minutes) < 30) {
495 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
496 real_seconds = bin2bcd(real_seconds);
497 real_minutes = bin2bcd(real_minutes);
498 }
499 CMOS_WRITE(real_seconds,RTC_SECONDS);
500 CMOS_WRITE(real_minutes,RTC_MINUTES);
501 } else {
502 printk_once(KERN_NOTICE
503 "set_rtc_mmss: can't update from %d to %d\n",
504 cmos_minutes, real_minutes);
505 retval = -1;
506 }
507
508 /* The following flags have to be released exactly in this order,
509 * otherwise the DS12887 (popular MC146818A clone with integrated
510 * battery and quartz) will not reset the oscillator and will not
511 * update precisely 500 ms later. You won't find this mentioned in
512 * the Dallas Semiconductor data sheets, but who believes data
513 * sheets anyway ... -- Markus Kuhn
514 */
515 CMOS_WRITE(save_control, RTC_CONTROL);
516 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
517 spin_unlock(&rtc_lock);
518
519 return retval;
520} 464}
465#endif
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index bd0665cdc840..9c4c189eb22f 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -241,6 +241,21 @@ do_entIF(unsigned long type, struct pt_regs *regs)
241 (const char *)(data[1] | (long)data[2] << 32), 241 (const char *)(data[1] | (long)data[2] << 32),
242 data[0]); 242 data[0]);
243 } 243 }
244#ifdef CONFIG_ALPHA_WTINT
245 if (type == 4) {
246 /* If CALL_PAL WTINT is totally unsupported by the
247 PALcode, e.g. MILO, "emulate" it by overwriting
248 the insn. */
249 unsigned int *pinsn
250 = (unsigned int *) regs->pc - 1;
251 if (*pinsn == PAL_wtint) {
252 *pinsn = 0x47e01400; /* mov 0,$0 */
253 imb();
254 regs->r0 = 0;
255 return;
256 }
257 }
258#endif /* ALPHA_WTINT */
244 die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"), 259 die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
245 regs, type, NULL); 260 regs, type, NULL);
246 } 261 }
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index ffb19b7da999..ff3c10721caf 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -130,7 +130,7 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
130 *dst = word | tmp; 130 *dst = word | tmp;
131 checksum += carry; 131 checksum += carry;
132 } 132 }
133 if (err) *errp = err; 133 if (err && errp) *errp = err;
134 return checksum; 134 return checksum;
135} 135}
136 136
@@ -185,7 +185,7 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
185 *dst = word | tmp; 185 *dst = word | tmp;
186 checksum += carry; 186 checksum += carry;
187 } 187 }
188 if (err) *errp = err; 188 if (err && errp) *errp = err;
189 return checksum; 189 return checksum;
190} 190}
191 191
@@ -242,7 +242,7 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
242 stq_u(partial_dest | second_dest, dst); 242 stq_u(partial_dest | second_dest, dst);
243out: 243out:
244 checksum += carry; 244 checksum += carry;
245 if (err) *errp = err; 245 if (err && errp) *errp = err;
246 return checksum; 246 return checksum;
247} 247}
248 248
@@ -325,7 +325,7 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
325 stq_u(partial_dest | word | second_dest, dst); 325 stq_u(partial_dest | word | second_dest, dst);
326 checksum += carry; 326 checksum += carry;
327 } 327 }
328 if (err) *errp = err; 328 if (err && errp) *errp = err;
329 return checksum; 329 return checksum;
330} 330}
331 331
@@ -339,7 +339,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
339 339
340 if (len) { 340 if (len) {
341 if (!access_ok(VERIFY_READ, src, len)) { 341 if (!access_ok(VERIFY_READ, src, len)) {
342 *errp = -EFAULT; 342 if (errp) *errp = -EFAULT;
343 memset(dst, 0, len); 343 memset(dst, 0, len);
344 return sum; 344 return sum;
345 } 345 }
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index d8b94e1c7fca..356bb2fdd705 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -30,14 +30,15 @@
30 .set noat 30 .set noat
31 .set noreorder 31 .set noreorder
32.text 32.text
33 .globl memset
33 .globl __memset 34 .globl __memset
35 .globl ___memset
34 .globl __memsetw 36 .globl __memsetw
35 .globl __constant_c_memset 37 .globl __constant_c_memset
36 .globl memset
37 38
38 .ent __memset 39 .ent ___memset
39.align 5 40.align 5
40__memset: 41___memset:
41 .frame $30,0,$26,0 42 .frame $30,0,$26,0
42 .prologue 0 43 .prologue 0
43 44
@@ -227,7 +228,7 @@ end_b:
227 nop 228 nop
228 nop 229 nop
229 ret $31,($26),1 # L0 : 230 ret $31,($26),1 # L0 :
230 .end __memset 231 .end ___memset
231 232
232 /* 233 /*
233 * This is the original body of code, prior to replication and 234 * This is the original body of code, prior to replication and
@@ -594,4 +595,5 @@ end_w:
594 595
595 .end __memsetw 596 .end __memsetw
596 597
597memset = __memset 598memset = ___memset
599__memset = ___memset
diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S
index 311b8cfc6914..76ccc6d1f364 100644
--- a/arch/alpha/lib/memset.S
+++ b/arch/alpha/lib/memset.S
@@ -19,11 +19,13 @@
19.text 19.text
20 .globl memset 20 .globl memset
21 .globl __memset 21 .globl __memset
22 .globl ___memset
22 .globl __memsetw 23 .globl __memsetw
23 .globl __constant_c_memset 24 .globl __constant_c_memset
24 .ent __memset 25
26 .ent ___memset
25.align 5 27.align 5
26__memset: 28___memset:
27 .frame $30,0,$26,0 29 .frame $30,0,$26,0
28 .prologue 0 30 .prologue 0
29 31
@@ -103,7 +105,7 @@ within_one_quad:
103 105
104end: 106end:
105 ret $31,($26),1 /* E1 */ 107 ret $31,($26),1 /* E1 */
106 .end __memset 108 .end ___memset
107 109
108 .align 5 110 .align 5
109 .ent __memsetw 111 .ent __memsetw
@@ -121,4 +123,5 @@ __memsetw:
121 123
122 .end __memsetw 124 .end __memsetw
123 125
124memset = __memset 126memset = ___memset
127__memset = ___memset
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2ee0c9bfd032..9063ae6553cc 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -8,6 +8,7 @@
8 8
9config ARC 9config ARC
10 def_bool y 10 def_bool y
11 select BUILDTIME_EXTABLE_SORT
11 select CLONE_BACKWARDS 12 select CLONE_BACKWARDS
12 # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev 13 # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
13 select DEVTMPFS if !INITRAMFS_SOURCE="" 14 select DEVTMPFS if !INITRAMFS_SOURCE=""
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi
index d9f8249aa66e..3942634f805a 100644
--- a/arch/arc/boot/dts/abilis_tb100.dtsi
+++ b/arch/arc/boot/dts/abilis_tb100.dtsi
@@ -43,124 +43,124 @@
43 iomux: iomux@FF10601c { 43 iomux: iomux@FF10601c {
44 /* Port 1 */ 44 /* Port 1 */
45 pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */ 45 pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
46 pingrp = "mis0_pins"; 46 abilis,function = "mis0";
47 }; 47 };
48 pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */ 48 pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */
49 pingrp = "mis1_pins"; 49 abilis,function = "mis1";
50 }; 50 };
51 pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */ 51 pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */
52 pingrp = "gpioa_pins"; 52 abilis,function = "gpioa";
53 }; 53 };
54 pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */ 54 pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */
55 pingrp = "mip1_pins"; 55 abilis,function = "mip1";
56 }; 56 };
57 /* Port 2 */ 57 /* Port 2 */
58 pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */ 58 pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */
59 pingrp = "mis2_pins"; 59 abilis,function = "mis2";
60 }; 60 };
61 pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */ 61 pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */
62 pingrp = "mis3_pins"; 62 abilis,function = "mis3";
63 }; 63 };
64 pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */ 64 pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */
65 pingrp = "gpioc_pins"; 65 abilis,function = "gpioc";
66 }; 66 };
67 pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */ 67 pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */
68 pingrp = "mip3_pins"; 68 abilis,function = "mip3";
69 }; 69 };
70 /* Port 3 */ 70 /* Port 3 */
71 pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */ 71 pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */
72 pingrp = "mis4_pins"; 72 abilis,function = "mis4";
73 }; 73 };
74 pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */ 74 pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */
75 pingrp = "mis5_pins"; 75 abilis,function = "mis5";
76 }; 76 };
77 pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */ 77 pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */
78 pingrp = "gpioe_pins"; 78 abilis,function = "gpioe";
79 }; 79 };
80 pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */ 80 pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */
81 pingrp = "mip5_pins"; 81 abilis,function = "mip5";
82 }; 82 };
83 /* Port 4 */ 83 /* Port 4 */
84 pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */ 84 pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */
85 pingrp = "mis6_pins"; 85 abilis,function = "mis6";
86 }; 86 };
87 pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */ 87 pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */
88 pingrp = "mis7_pins"; 88 abilis,function = "mis7";
89 }; 89 };
90 pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */ 90 pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */
91 pingrp = "gpiog_pins"; 91 abilis,function = "gpiog";
92 }; 92 };
93 pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */ 93 pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */
94 pingrp = "mip7_pins"; 94 abilis,function = "mip7";
95 }; 95 };
96 /* Port 5 */ 96 /* Port 5 */
97 pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */ 97 pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */
98 pingrp = "gpioj_pins"; 98 abilis,function = "gpioj";
99 }; 99 };
100 pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */ 100 pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */
101 pingrp = "gpiok_pins"; 101 abilis,function = "gpiok";
102 }; 102 };
103 pctl_ciplus: pctl-ciplus { /* CI+ interface */ 103 pctl_ciplus: pctl-ciplus { /* CI+ interface */
104 pingrp = "ciplus_pins"; 104 abilis,function = "ciplus";
105 }; 105 };
106 pctl_mcard: pctl-mcard { /* M-Card interface */ 106 pctl_mcard: pctl-mcard { /* M-Card interface */
107 pingrp = "mcard_pins"; 107 abilis,function = "mcard";
108 }; 108 };
109 /* Port 6 */ 109 /* Port 6 */
110 pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */ 110 pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */
111 pingrp = "mop_pins"; 111 abilis,function = "mop";
112 }; 112 };
113 pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */ 113 pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */
114 pingrp = "mos0_pins"; 114 abilis,function = "mos0";
115 }; 115 };
116 pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */ 116 pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */
117 pingrp = "mos1_pins"; 117 abilis,function = "mos1";
118 }; 118 };
119 pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */ 119 pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */
120 pingrp = "mos2_pins"; 120 abilis,function = "mos2";
121 }; 121 };
122 pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */ 122 pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */
123 pingrp = "mos3_pins"; 123 abilis,function = "mos3";
124 }; 124 };
125 /* Port 7 */ 125 /* Port 7 */
126 pctl_uart0: pctl-uart0 { /* UART 0 */ 126 pctl_uart0: pctl-uart0 { /* UART 0 */
127 pingrp = "uart0_pins"; 127 abilis,function = "uart0";
128 }; 128 };
129 pctl_uart1: pctl-uart1 { /* UART 1 */ 129 pctl_uart1: pctl-uart1 { /* UART 1 */
130 pingrp = "uart1_pins"; 130 abilis,function = "uart1";
131 }; 131 };
132 pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */ 132 pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */
133 pingrp = "gpiol_pins"; 133 abilis,function = "gpiol";
134 }; 134 };
135 pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */ 135 pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */
136 pingrp = "gpiom_pins"; 136 abilis,function = "gpiom";
137 }; 137 };
138 /* Port 8 */ 138 /* Port 8 */
139 pctl_spi3: pctl-spi3 { 139 pctl_spi3: pctl-spi3 {
140 pingrp = "spi3_pins"; 140 abilis,function = "spi3";
141 }; 141 };
142 /* Port 9 */ 142 /* Port 9 */
143 pctl_spi1: pctl-spi1 { 143 pctl_spi1: pctl-spi1 {
144 pingrp = "spi1_pins"; 144 abilis,function = "spi1";
145 }; 145 };
146 pctl_gpio_n: pctl-gpio-n { 146 pctl_gpio_n: pctl-gpio-n {
147 pingrp = "gpion_pins"; 147 abilis,function = "gpion";
148 }; 148 };
149 /* Unmuxed GPIOs */ 149 /* Unmuxed GPIOs */
150 pctl_gpio_b: pctl-gpio-b { 150 pctl_gpio_b: pctl-gpio-b {
151 pingrp = "gpiob_pins"; 151 abilis,function = "gpiob";
152 }; 152 };
153 pctl_gpio_d: pctl-gpio-d { 153 pctl_gpio_d: pctl-gpio-d {
154 pingrp = "gpiod_pins"; 154 abilis,function = "gpiod";
155 }; 155 };
156 pctl_gpio_f: pctl-gpio-f { 156 pctl_gpio_f: pctl-gpio-f {
157 pingrp = "gpiof_pins"; 157 abilis,function = "gpiof";
158 }; 158 };
159 pctl_gpio_h: pctl-gpio-h { 159 pctl_gpio_h: pctl-gpio-h {
160 pingrp = "gpioh_pins"; 160 abilis,function = "gpioh";
161 }; 161 };
162 pctl_gpio_i: pctl-gpio-i { 162 pctl_gpio_i: pctl-gpio-i {
163 pingrp = "gpioi_pins"; 163 abilis,function = "gpioi";
164 }; 164 };
165 }; 165 };
166 166
@@ -172,9 +172,10 @@
172 interrupts = <27 2>; 172 interrupts = <27 2>;
173 reg = <0xFF140000 0x1000>; 173 reg = <0xFF140000 0x1000>;
174 gpio-controller; 174 gpio-controller;
175 #gpio-cells = <1>; 175 #gpio-cells = <2>;
176 gpio-base = <0>; 176 abilis,ngpio = <3>;
177 gpio-pins = <&pctl_gpio_a>; 177 gpio-ranges = <&iomux 0 0 0>;
178 gpio-ranges-group-names = "gpioa";
178 }; 179 };
179 gpiob: gpio@FF141000 { 180 gpiob: gpio@FF141000 {
180 compatible = "abilis,tb10x-gpio"; 181 compatible = "abilis,tb10x-gpio";
@@ -184,9 +185,10 @@
184 interrupts = <27 2>; 185 interrupts = <27 2>;
185 reg = <0xFF141000 0x1000>; 186 reg = <0xFF141000 0x1000>;
186 gpio-controller; 187 gpio-controller;
187 #gpio-cells = <1>; 188 #gpio-cells = <2>;
188 gpio-base = <3>; 189 abilis,ngpio = <2>;
189 gpio-pins = <&pctl_gpio_b>; 190 gpio-ranges = <&iomux 0 0 0>;
191 gpio-ranges-group-names = "gpiob";
190 }; 192 };
191 gpioc: gpio@FF142000 { 193 gpioc: gpio@FF142000 {
192 compatible = "abilis,tb10x-gpio"; 194 compatible = "abilis,tb10x-gpio";
@@ -196,9 +198,10 @@
196 interrupts = <27 2>; 198 interrupts = <27 2>;
197 reg = <0xFF142000 0x1000>; 199 reg = <0xFF142000 0x1000>;
198 gpio-controller; 200 gpio-controller;
199 #gpio-cells = <1>; 201 #gpio-cells = <2>;
200 gpio-base = <5>; 202 abilis,ngpio = <3>;
201 gpio-pins = <&pctl_gpio_c>; 203 gpio-ranges = <&iomux 0 0 0>;
204 gpio-ranges-group-names = "gpioc";
202 }; 205 };
203 gpiod: gpio@FF143000 { 206 gpiod: gpio@FF143000 {
204 compatible = "abilis,tb10x-gpio"; 207 compatible = "abilis,tb10x-gpio";
@@ -208,9 +211,10 @@
208 interrupts = <27 2>; 211 interrupts = <27 2>;
209 reg = <0xFF143000 0x1000>; 212 reg = <0xFF143000 0x1000>;
210 gpio-controller; 213 gpio-controller;
211 #gpio-cells = <1>; 214 #gpio-cells = <2>;
212 gpio-base = <8>; 215 abilis,ngpio = <2>;
213 gpio-pins = <&pctl_gpio_d>; 216 gpio-ranges = <&iomux 0 0 0>;
217 gpio-ranges-group-names = "gpiod";
214 }; 218 };
215 gpioe: gpio@FF144000 { 219 gpioe: gpio@FF144000 {
216 compatible = "abilis,tb10x-gpio"; 220 compatible = "abilis,tb10x-gpio";
@@ -220,9 +224,10 @@
220 interrupts = <27 2>; 224 interrupts = <27 2>;
221 reg = <0xFF144000 0x1000>; 225 reg = <0xFF144000 0x1000>;
222 gpio-controller; 226 gpio-controller;
223 #gpio-cells = <1>; 227 #gpio-cells = <2>;
224 gpio-base = <10>; 228 abilis,ngpio = <3>;
225 gpio-pins = <&pctl_gpio_e>; 229 gpio-ranges = <&iomux 0 0 0>;
230 gpio-ranges-group-names = "gpioe";
226 }; 231 };
227 gpiof: gpio@FF145000 { 232 gpiof: gpio@FF145000 {
228 compatible = "abilis,tb10x-gpio"; 233 compatible = "abilis,tb10x-gpio";
@@ -232,9 +237,10 @@
232 interrupts = <27 2>; 237 interrupts = <27 2>;
233 reg = <0xFF145000 0x1000>; 238 reg = <0xFF145000 0x1000>;
234 gpio-controller; 239 gpio-controller;
235 #gpio-cells = <1>; 240 #gpio-cells = <2>;
236 gpio-base = <13>; 241 abilis,ngpio = <2>;
237 gpio-pins = <&pctl_gpio_f>; 242 gpio-ranges = <&iomux 0 0 0>;
243 gpio-ranges-group-names = "gpiof";
238 }; 244 };
239 gpiog: gpio@FF146000 { 245 gpiog: gpio@FF146000 {
240 compatible = "abilis,tb10x-gpio"; 246 compatible = "abilis,tb10x-gpio";
@@ -244,9 +250,10 @@
244 interrupts = <27 2>; 250 interrupts = <27 2>;
245 reg = <0xFF146000 0x1000>; 251 reg = <0xFF146000 0x1000>;
246 gpio-controller; 252 gpio-controller;
247 #gpio-cells = <1>; 253 #gpio-cells = <2>;
248 gpio-base = <15>; 254 abilis,ngpio = <3>;
249 gpio-pins = <&pctl_gpio_g>; 255 gpio-ranges = <&iomux 0 0 0>;
256 gpio-ranges-group-names = "gpiog";
250 }; 257 };
251 gpioh: gpio@FF147000 { 258 gpioh: gpio@FF147000 {
252 compatible = "abilis,tb10x-gpio"; 259 compatible = "abilis,tb10x-gpio";
@@ -256,9 +263,10 @@
256 interrupts = <27 2>; 263 interrupts = <27 2>;
257 reg = <0xFF147000 0x1000>; 264 reg = <0xFF147000 0x1000>;
258 gpio-controller; 265 gpio-controller;
259 #gpio-cells = <1>; 266 #gpio-cells = <2>;
260 gpio-base = <18>; 267 abilis,ngpio = <2>;
261 gpio-pins = <&pctl_gpio_h>; 268 gpio-ranges = <&iomux 0 0 0>;
269 gpio-ranges-group-names = "gpioh";
262 }; 270 };
263 gpioi: gpio@FF148000 { 271 gpioi: gpio@FF148000 {
264 compatible = "abilis,tb10x-gpio"; 272 compatible = "abilis,tb10x-gpio";
@@ -268,9 +276,10 @@
268 interrupts = <27 2>; 276 interrupts = <27 2>;
269 reg = <0xFF148000 0x1000>; 277 reg = <0xFF148000 0x1000>;
270 gpio-controller; 278 gpio-controller;
271 #gpio-cells = <1>; 279 #gpio-cells = <2>;
272 gpio-base = <20>; 280 abilis,ngpio = <12>;
273 gpio-pins = <&pctl_gpio_i>; 281 gpio-ranges = <&iomux 0 0 0>;
282 gpio-ranges-group-names = "gpioi";
274 }; 283 };
275 gpioj: gpio@FF149000 { 284 gpioj: gpio@FF149000 {
276 compatible = "abilis,tb10x-gpio"; 285 compatible = "abilis,tb10x-gpio";
@@ -280,9 +289,10 @@
280 interrupts = <27 2>; 289 interrupts = <27 2>;
281 reg = <0xFF149000 0x1000>; 290 reg = <0xFF149000 0x1000>;
282 gpio-controller; 291 gpio-controller;
283 #gpio-cells = <1>; 292 #gpio-cells = <2>;
284 gpio-base = <32>; 293 abilis,ngpio = <32>;
285 gpio-pins = <&pctl_gpio_j>; 294 gpio-ranges = <&iomux 0 0 0>;
295 gpio-ranges-group-names = "gpioj";
286 }; 296 };
287 gpiok: gpio@FF14a000 { 297 gpiok: gpio@FF14a000 {
288 compatible = "abilis,tb10x-gpio"; 298 compatible = "abilis,tb10x-gpio";
@@ -292,9 +302,10 @@
292 interrupts = <27 2>; 302 interrupts = <27 2>;
293 reg = <0xFF14A000 0x1000>; 303 reg = <0xFF14A000 0x1000>;
294 gpio-controller; 304 gpio-controller;
295 #gpio-cells = <1>; 305 #gpio-cells = <2>;
296 gpio-base = <64>; 306 abilis,ngpio = <22>;
297 gpio-pins = <&pctl_gpio_k>; 307 gpio-ranges = <&iomux 0 0 0>;
308 gpio-ranges-group-names = "gpiok";
298 }; 309 };
299 gpiol: gpio@FF14b000 { 310 gpiol: gpio@FF14b000 {
300 compatible = "abilis,tb10x-gpio"; 311 compatible = "abilis,tb10x-gpio";
@@ -304,9 +315,10 @@
304 interrupts = <27 2>; 315 interrupts = <27 2>;
305 reg = <0xFF14B000 0x1000>; 316 reg = <0xFF14B000 0x1000>;
306 gpio-controller; 317 gpio-controller;
307 #gpio-cells = <1>; 318 #gpio-cells = <2>;
308 gpio-base = <86>; 319 abilis,ngpio = <4>;
309 gpio-pins = <&pctl_gpio_l>; 320 gpio-ranges = <&iomux 0 0 0>;
321 gpio-ranges-group-names = "gpiol";
310 }; 322 };
311 gpiom: gpio@FF14c000 { 323 gpiom: gpio@FF14c000 {
312 compatible = "abilis,tb10x-gpio"; 324 compatible = "abilis,tb10x-gpio";
@@ -316,9 +328,10 @@
316 interrupts = <27 2>; 328 interrupts = <27 2>;
317 reg = <0xFF14C000 0x1000>; 329 reg = <0xFF14C000 0x1000>;
318 gpio-controller; 330 gpio-controller;
319 #gpio-cells = <1>; 331 #gpio-cells = <2>;
320 gpio-base = <90>; 332 abilis,ngpio = <4>;
321 gpio-pins = <&pctl_gpio_m>; 333 gpio-ranges = <&iomux 0 0 0>;
334 gpio-ranges-group-names = "gpiom";
322 }; 335 };
323 gpion: gpio@FF14d000 { 336 gpion: gpio@FF14d000 {
324 compatible = "abilis,tb10x-gpio"; 337 compatible = "abilis,tb10x-gpio";
@@ -328,9 +341,10 @@
328 interrupts = <27 2>; 341 interrupts = <27 2>;
329 reg = <0xFF14D000 0x1000>; 342 reg = <0xFF14D000 0x1000>;
330 gpio-controller; 343 gpio-controller;
331 #gpio-cells = <1>; 344 #gpio-cells = <2>;
332 gpio-base = <94>; 345 abilis,ngpio = <5>;
333 gpio-pins = <&pctl_gpio_n>; 346 gpio-ranges = <&iomux 0 0 0>;
347 gpio-ranges-group-names = "gpion";
334 }; 348 };
335 }; 349 };
336}; 350};
diff --git a/arch/arc/boot/dts/abilis_tb100_dvk.dts b/arch/arc/boot/dts/abilis_tb100_dvk.dts
index ebc313a9f5b2..3dd6ed941464 100644
--- a/arch/arc/boot/dts/abilis_tb100_dvk.dts
+++ b/arch/arc/boot/dts/abilis_tb100_dvk.dts
@@ -64,62 +64,62 @@
64 compatible = "gpio-leds"; 64 compatible = "gpio-leds";
65 power { 65 power {
66 label = "Power"; 66 label = "Power";
67 gpios = <&gpioi 0>; 67 gpios = <&gpioi 0 0>;
68 linux,default-trigger = "default-on"; 68 linux,default-trigger = "default-on";
69 }; 69 };
70 heartbeat { 70 heartbeat {
71 label = "Heartbeat"; 71 label = "Heartbeat";
72 gpios = <&gpioi 1>; 72 gpios = <&gpioi 1 0>;
73 linux,default-trigger = "heartbeat"; 73 linux,default-trigger = "heartbeat";
74 }; 74 };
75 led2 { 75 led2 {
76 label = "LED2"; 76 label = "LED2";
77 gpios = <&gpioi 2>; 77 gpios = <&gpioi 2 0>;
78 default-state = "off"; 78 default-state = "off";
79 }; 79 };
80 led3 { 80 led3 {
81 label = "LED3"; 81 label = "LED3";
82 gpios = <&gpioi 3>; 82 gpios = <&gpioi 3 0>;
83 default-state = "off"; 83 default-state = "off";
84 }; 84 };
85 led4 { 85 led4 {
86 label = "LED4"; 86 label = "LED4";
87 gpios = <&gpioi 4>; 87 gpios = <&gpioi 4 0>;
88 default-state = "off"; 88 default-state = "off";
89 }; 89 };
90 led5 { 90 led5 {
91 label = "LED5"; 91 label = "LED5";
92 gpios = <&gpioi 5>; 92 gpios = <&gpioi 5 0>;
93 default-state = "off"; 93 default-state = "off";
94 }; 94 };
95 led6 { 95 led6 {
96 label = "LED6"; 96 label = "LED6";
97 gpios = <&gpioi 6>; 97 gpios = <&gpioi 6 0>;
98 default-state = "off"; 98 default-state = "off";
99 }; 99 };
100 led7 { 100 led7 {
101 label = "LED7"; 101 label = "LED7";
102 gpios = <&gpioi 7>; 102 gpios = <&gpioi 7 0>;
103 default-state = "off"; 103 default-state = "off";
104 }; 104 };
105 led8 { 105 led8 {
106 label = "LED8"; 106 label = "LED8";
107 gpios = <&gpioi 8>; 107 gpios = <&gpioi 8 0>;
108 default-state = "off"; 108 default-state = "off";
109 }; 109 };
110 led9 { 110 led9 {
111 label = "LED9"; 111 label = "LED9";
112 gpios = <&gpioi 9>; 112 gpios = <&gpioi 9 0>;
113 default-state = "off"; 113 default-state = "off";
114 }; 114 };
115 led10 { 115 led10 {
116 label = "LED10"; 116 label = "LED10";
117 gpios = <&gpioi 10>; 117 gpios = <&gpioi 10 0>;
118 default-state = "off"; 118 default-state = "off";
119 }; 119 };
120 led11 { 120 led11 {
121 label = "LED11"; 121 label = "LED11";
122 gpios = <&gpioi 11>; 122 gpios = <&gpioi 11 0>;
123 default-state = "off"; 123 default-state = "off";
124 }; 124 };
125 }; 125 };
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi
index da8ca7941e67..b0467229a5c4 100644
--- a/arch/arc/boot/dts/abilis_tb101.dtsi
+++ b/arch/arc/boot/dts/abilis_tb101.dtsi
@@ -43,133 +43,133 @@
43 iomux: iomux@FF10601c { 43 iomux: iomux@FF10601c {
44 /* Port 1 */ 44 /* Port 1 */
45 pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */ 45 pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
46 pingrp = "mis0_pins"; 46 abilis,function = "mis0";
47 }; 47 };
48 pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */ 48 pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */
49 pingrp = "mis1_pins"; 49 abilis,function = "mis1";
50 }; 50 };
51 pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */ 51 pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */
52 pingrp = "gpioa_pins"; 52 abilis,function = "gpioa";
53 }; 53 };
54 pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */ 54 pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */
55 pingrp = "mip1_pins"; 55 abilis,function = "mip1";
56 }; 56 };
57 /* Port 2 */ 57 /* Port 2 */
58 pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */ 58 pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */
59 pingrp = "mis2_pins"; 59 abilis,function = "mis2";
60 }; 60 };
61 pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */ 61 pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */
62 pingrp = "mis3_pins"; 62 abilis,function = "mis3";
63 }; 63 };
64 pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */ 64 pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */
65 pingrp = "gpioc_pins"; 65 abilis,function = "gpioc";
66 }; 66 };
67 pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */ 67 pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */
68 pingrp = "mip3_pins"; 68 abilis,function = "mip3";
69 }; 69 };
70 /* Port 3 */ 70 /* Port 3 */
71 pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */ 71 pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */
72 pingrp = "mis4_pins"; 72 abilis,function = "mis4";
73 }; 73 };
74 pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */ 74 pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */
75 pingrp = "mis5_pins"; 75 abilis,function = "mis5";
76 }; 76 };
77 pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */ 77 pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */
78 pingrp = "gpioe_pins"; 78 abilis,function = "gpioe";
79 }; 79 };
80 pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */ 80 pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */
81 pingrp = "mip5_pins"; 81 abilis,function = "mip5";
82 }; 82 };
83 /* Port 4 */ 83 /* Port 4 */
84 pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */ 84 pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */
85 pingrp = "mis6_pins"; 85 abilis,function = "mis6";
86 }; 86 };
87 pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */ 87 pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */
88 pingrp = "mis7_pins"; 88 abilis,function = "mis7";
89 }; 89 };
90 pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */ 90 pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */
91 pingrp = "gpiog_pins"; 91 abilis,function = "gpiog";
92 }; 92 };
93 pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */ 93 pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */
94 pingrp = "mip7_pins"; 94 abilis,function = "mip7";
95 }; 95 };
96 /* Port 5 */ 96 /* Port 5 */
97 pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */ 97 pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */
98 pingrp = "gpioj_pins"; 98 abilis,function = "gpioj";
99 }; 99 };
100 pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */ 100 pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */
101 pingrp = "gpiok_pins"; 101 abilis,function = "gpiok";
102 }; 102 };
103 pctl_ciplus: pctl-ciplus { /* CI+ interface */ 103 pctl_ciplus: pctl-ciplus { /* CI+ interface */
104 pingrp = "ciplus_pins"; 104 abilis,function = "ciplus";
105 }; 105 };
106 pctl_mcard: pctl-mcard { /* M-Card interface */ 106 pctl_mcard: pctl-mcard { /* M-Card interface */
107 pingrp = "mcard_pins"; 107 abilis,function = "mcard";
108 }; 108 };
109 pctl_stc0: pctl-stc0 { /* Smart card I/F 0 */ 109 pctl_stc0: pctl-stc0 { /* Smart card I/F 0 */
110 pingrp = "stc0_pins"; 110 abilis,function = "stc0";
111 }; 111 };
112 pctl_stc1: pctl-stc1 { /* Smart card I/F 1 */ 112 pctl_stc1: pctl-stc1 { /* Smart card I/F 1 */
113 pingrp = "stc1_pins"; 113 abilis,function = "stc1";
114 }; 114 };
115 /* Port 6 */ 115 /* Port 6 */
116 pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */ 116 pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */
117 pingrp = "mop_pins"; 117 abilis,function = "mop";
118 }; 118 };
119 pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */ 119 pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */
120 pingrp = "mos0_pins"; 120 abilis,function = "mos0";
121 }; 121 };
122 pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */ 122 pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */
123 pingrp = "mos1_pins"; 123 abilis,function = "mos1";
124 }; 124 };
125 pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */ 125 pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */
126 pingrp = "mos2_pins"; 126 abilis,function = "mos2";
127 }; 127 };
128 pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */ 128 pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */
129 pingrp = "mos3_pins"; 129 abilis,function = "mos3";
130 }; 130 };
131 /* Port 7 */ 131 /* Port 7 */
132 pctl_uart0: pctl-uart0 { /* UART 0 */ 132 pctl_uart0: pctl-uart0 { /* UART 0 */
133 pingrp = "uart0_pins"; 133 abilis,function = "uart0";
134 }; 134 };
135 pctl_uart1: pctl-uart1 { /* UART 1 */ 135 pctl_uart1: pctl-uart1 { /* UART 1 */
136 pingrp = "uart1_pins"; 136 abilis,function = "uart1";
137 }; 137 };
138 pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */ 138 pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */
139 pingrp = "gpiol_pins"; 139 abilis,function = "gpiol";
140 }; 140 };
141 pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */ 141 pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */
142 pingrp = "gpiom_pins"; 142 abilis,function = "gpiom";
143 }; 143 };
144 /* Port 8 */ 144 /* Port 8 */
145 pctl_spi3: pctl-spi3 { 145 pctl_spi3: pctl-spi3 {
146 pingrp = "spi3_pins"; 146 abilis,function = "spi3";
147 }; 147 };
148 pctl_jtag: pctl-jtag { 148 pctl_jtag: pctl-jtag {
149 pingrp = "jtag_pins"; 149 abilis,function = "jtag";
150 }; 150 };
151 /* Port 9 */ 151 /* Port 9 */
152 pctl_spi1: pctl-spi1 { 152 pctl_spi1: pctl-spi1 {
153 pingrp = "spi1_pins"; 153 abilis,function = "spi1";
154 }; 154 };
155 pctl_gpio_n: pctl-gpio-n { 155 pctl_gpio_n: pctl-gpio-n {
156 pingrp = "gpion_pins"; 156 abilis,function = "gpion";
157 }; 157 };
158 /* Unmuxed GPIOs */ 158 /* Unmuxed GPIOs */
159 pctl_gpio_b: pctl-gpio-b { 159 pctl_gpio_b: pctl-gpio-b {
160 pingrp = "gpiob_pins"; 160 abilis,function = "gpiob";
161 }; 161 };
162 pctl_gpio_d: pctl-gpio-d { 162 pctl_gpio_d: pctl-gpio-d {
163 pingrp = "gpiod_pins"; 163 abilis,function = "gpiod";
164 }; 164 };
165 pctl_gpio_f: pctl-gpio-f { 165 pctl_gpio_f: pctl-gpio-f {
166 pingrp = "gpiof_pins"; 166 abilis,function = "gpiof";
167 }; 167 };
168 pctl_gpio_h: pctl-gpio-h { 168 pctl_gpio_h: pctl-gpio-h {
169 pingrp = "gpioh_pins"; 169 abilis,function = "gpioh";
170 }; 170 };
171 pctl_gpio_i: pctl-gpio-i { 171 pctl_gpio_i: pctl-gpio-i {
172 pingrp = "gpioi_pins"; 172 abilis,function = "gpioi";
173 }; 173 };
174 }; 174 };
175 175
@@ -181,9 +181,10 @@
181 interrupts = <27 2>; 181 interrupts = <27 2>;
182 reg = <0xFF140000 0x1000>; 182 reg = <0xFF140000 0x1000>;
183 gpio-controller; 183 gpio-controller;
184 #gpio-cells = <1>; 184 #gpio-cells = <2>;
185 gpio-base = <0>; 185 abilis,ngpio = <3>;
186 gpio-pins = <&pctl_gpio_a>; 186 gpio-ranges = <&iomux 0 0 0>;
187 gpio-ranges-group-names = "gpioa";
187 }; 188 };
188 gpiob: gpio@FF141000 { 189 gpiob: gpio@FF141000 {
189 compatible = "abilis,tb10x-gpio"; 190 compatible = "abilis,tb10x-gpio";
@@ -193,9 +194,10 @@
193 interrupts = <27 2>; 194 interrupts = <27 2>;
194 reg = <0xFF141000 0x1000>; 195 reg = <0xFF141000 0x1000>;
195 gpio-controller; 196 gpio-controller;
196 #gpio-cells = <1>; 197 #gpio-cells = <2>;
197 gpio-base = <3>; 198 abilis,ngpio = <2>;
198 gpio-pins = <&pctl_gpio_b>; 199 gpio-ranges = <&iomux 0 0 0>;
200 gpio-ranges-group-names = "gpiob";
199 }; 201 };
200 gpioc: gpio@FF142000 { 202 gpioc: gpio@FF142000 {
201 compatible = "abilis,tb10x-gpio"; 203 compatible = "abilis,tb10x-gpio";
@@ -205,9 +207,10 @@
205 interrupts = <27 2>; 207 interrupts = <27 2>;
206 reg = <0xFF142000 0x1000>; 208 reg = <0xFF142000 0x1000>;
207 gpio-controller; 209 gpio-controller;
208 #gpio-cells = <1>; 210 #gpio-cells = <2>;
209 gpio-base = <5>; 211 abilis,ngpio = <3>;
210 gpio-pins = <&pctl_gpio_c>; 212 gpio-ranges = <&iomux 0 0 0>;
213 gpio-ranges-group-names = "gpioc";
211 }; 214 };
212 gpiod: gpio@FF143000 { 215 gpiod: gpio@FF143000 {
213 compatible = "abilis,tb10x-gpio"; 216 compatible = "abilis,tb10x-gpio";
@@ -217,9 +220,10 @@
217 interrupts = <27 2>; 220 interrupts = <27 2>;
218 reg = <0xFF143000 0x1000>; 221 reg = <0xFF143000 0x1000>;
219 gpio-controller; 222 gpio-controller;
220 #gpio-cells = <1>; 223 #gpio-cells = <2>;
221 gpio-base = <8>; 224 abilis,ngpio = <2>;
222 gpio-pins = <&pctl_gpio_d>; 225 gpio-ranges = <&iomux 0 0 0>;
226 gpio-ranges-group-names = "gpiod";
223 }; 227 };
224 gpioe: gpio@FF144000 { 228 gpioe: gpio@FF144000 {
225 compatible = "abilis,tb10x-gpio"; 229 compatible = "abilis,tb10x-gpio";
@@ -229,9 +233,10 @@
229 interrupts = <27 2>; 233 interrupts = <27 2>;
230 reg = <0xFF144000 0x1000>; 234 reg = <0xFF144000 0x1000>;
231 gpio-controller; 235 gpio-controller;
232 #gpio-cells = <1>; 236 #gpio-cells = <2>;
233 gpio-base = <10>; 237 abilis,ngpio = <3>;
234 gpio-pins = <&pctl_gpio_e>; 238 gpio-ranges = <&iomux 0 0 0>;
239 gpio-ranges-group-names = "gpioe";
235 }; 240 };
236 gpiof: gpio@FF145000 { 241 gpiof: gpio@FF145000 {
237 compatible = "abilis,tb10x-gpio"; 242 compatible = "abilis,tb10x-gpio";
@@ -241,9 +246,10 @@
241 interrupts = <27 2>; 246 interrupts = <27 2>;
242 reg = <0xFF145000 0x1000>; 247 reg = <0xFF145000 0x1000>;
243 gpio-controller; 248 gpio-controller;
244 #gpio-cells = <1>; 249 #gpio-cells = <2>;
245 gpio-base = <13>; 250 abilis,ngpio = <2>;
246 gpio-pins = <&pctl_gpio_f>; 251 gpio-ranges = <&iomux 0 0 0>;
252 gpio-ranges-group-names = "gpiof";
247 }; 253 };
248 gpiog: gpio@FF146000 { 254 gpiog: gpio@FF146000 {
249 compatible = "abilis,tb10x-gpio"; 255 compatible = "abilis,tb10x-gpio";
@@ -253,9 +259,10 @@
253 interrupts = <27 2>; 259 interrupts = <27 2>;
254 reg = <0xFF146000 0x1000>; 260 reg = <0xFF146000 0x1000>;
255 gpio-controller; 261 gpio-controller;
256 #gpio-cells = <1>; 262 #gpio-cells = <2>;
257 gpio-base = <15>; 263 abilis,ngpio = <3>;
258 gpio-pins = <&pctl_gpio_g>; 264 gpio-ranges = <&iomux 0 0 0>;
265 gpio-ranges-group-names = "gpiog";
259 }; 266 };
260 gpioh: gpio@FF147000 { 267 gpioh: gpio@FF147000 {
261 compatible = "abilis,tb10x-gpio"; 268 compatible = "abilis,tb10x-gpio";
@@ -265,9 +272,10 @@
265 interrupts = <27 2>; 272 interrupts = <27 2>;
266 reg = <0xFF147000 0x1000>; 273 reg = <0xFF147000 0x1000>;
267 gpio-controller; 274 gpio-controller;
268 #gpio-cells = <1>; 275 #gpio-cells = <2>;
269 gpio-base = <18>; 276 abilis,ngpio = <2>;
270 gpio-pins = <&pctl_gpio_h>; 277 gpio-ranges = <&iomux 0 0 0>;
278 gpio-ranges-group-names = "gpioh";
271 }; 279 };
272 gpioi: gpio@FF148000 { 280 gpioi: gpio@FF148000 {
273 compatible = "abilis,tb10x-gpio"; 281 compatible = "abilis,tb10x-gpio";
@@ -277,9 +285,10 @@
277 interrupts = <27 2>; 285 interrupts = <27 2>;
278 reg = <0xFF148000 0x1000>; 286 reg = <0xFF148000 0x1000>;
279 gpio-controller; 287 gpio-controller;
280 #gpio-cells = <1>; 288 #gpio-cells = <2>;
281 gpio-base = <20>; 289 abilis,ngpio = <12>;
282 gpio-pins = <&pctl_gpio_i>; 290 gpio-ranges = <&iomux 0 0 0>;
291 gpio-ranges-group-names = "gpioi";
283 }; 292 };
284 gpioj: gpio@FF149000 { 293 gpioj: gpio@FF149000 {
285 compatible = "abilis,tb10x-gpio"; 294 compatible = "abilis,tb10x-gpio";
@@ -289,9 +298,10 @@
289 interrupts = <27 2>; 298 interrupts = <27 2>;
290 reg = <0xFF149000 0x1000>; 299 reg = <0xFF149000 0x1000>;
291 gpio-controller; 300 gpio-controller;
292 #gpio-cells = <1>; 301 #gpio-cells = <2>;
293 gpio-base = <32>; 302 abilis,ngpio = <32>;
294 gpio-pins = <&pctl_gpio_j>; 303 gpio-ranges = <&iomux 0 0 0>;
304 gpio-ranges-group-names = "gpioj";
295 }; 305 };
296 gpiok: gpio@FF14a000 { 306 gpiok: gpio@FF14a000 {
297 compatible = "abilis,tb10x-gpio"; 307 compatible = "abilis,tb10x-gpio";
@@ -301,9 +311,10 @@
301 interrupts = <27 2>; 311 interrupts = <27 2>;
302 reg = <0xFF14A000 0x1000>; 312 reg = <0xFF14A000 0x1000>;
303 gpio-controller; 313 gpio-controller;
304 #gpio-cells = <1>; 314 #gpio-cells = <2>;
305 gpio-base = <64>; 315 abilis,ngpio = <22>;
306 gpio-pins = <&pctl_gpio_k>; 316 gpio-ranges = <&iomux 0 0 0>;
317 gpio-ranges-group-names = "gpiok";
307 }; 318 };
308 gpiol: gpio@FF14b000 { 319 gpiol: gpio@FF14b000 {
309 compatible = "abilis,tb10x-gpio"; 320 compatible = "abilis,tb10x-gpio";
@@ -313,9 +324,10 @@
313 interrupts = <27 2>; 324 interrupts = <27 2>;
314 reg = <0xFF14B000 0x1000>; 325 reg = <0xFF14B000 0x1000>;
315 gpio-controller; 326 gpio-controller;
316 #gpio-cells = <1>; 327 #gpio-cells = <2>;
317 gpio-base = <86>; 328 abilis,ngpio = <4>;
318 gpio-pins = <&pctl_gpio_l>; 329 gpio-ranges = <&iomux 0 0 0>;
330 gpio-ranges-group-names = "gpiol";
319 }; 331 };
320 gpiom: gpio@FF14c000 { 332 gpiom: gpio@FF14c000 {
321 compatible = "abilis,tb10x-gpio"; 333 compatible = "abilis,tb10x-gpio";
@@ -325,9 +337,10 @@
325 interrupts = <27 2>; 337 interrupts = <27 2>;
326 reg = <0xFF14C000 0x1000>; 338 reg = <0xFF14C000 0x1000>;
327 gpio-controller; 339 gpio-controller;
328 #gpio-cells = <1>; 340 #gpio-cells = <2>;
329 gpio-base = <90>; 341 abilis,ngpio = <4>;
330 gpio-pins = <&pctl_gpio_m>; 342 gpio-ranges = <&iomux 0 0 0>;
343 gpio-ranges-group-names = "gpiom";
331 }; 344 };
332 gpion: gpio@FF14d000 { 345 gpion: gpio@FF14d000 {
333 compatible = "abilis,tb10x-gpio"; 346 compatible = "abilis,tb10x-gpio";
@@ -337,9 +350,10 @@
337 interrupts = <27 2>; 350 interrupts = <27 2>;
338 reg = <0xFF14D000 0x1000>; 351 reg = <0xFF14D000 0x1000>;
339 gpio-controller; 352 gpio-controller;
340 #gpio-cells = <1>; 353 #gpio-cells = <2>;
341 gpio-base = <94>; 354 abilis,ngpio = <5>;
342 gpio-pins = <&pctl_gpio_n>; 355 gpio-ranges = <&iomux 0 0 0>;
356 gpio-ranges-group-names = "gpion";
343 }; 357 };
344 }; 358 };
345}; 359};
diff --git a/arch/arc/boot/dts/abilis_tb101_dvk.dts b/arch/arc/boot/dts/abilis_tb101_dvk.dts
index b204657993aa..1cf51c280f28 100644
--- a/arch/arc/boot/dts/abilis_tb101_dvk.dts
+++ b/arch/arc/boot/dts/abilis_tb101_dvk.dts
@@ -64,62 +64,62 @@
64 compatible = "gpio-leds"; 64 compatible = "gpio-leds";
65 power { 65 power {
66 label = "Power"; 66 label = "Power";
67 gpios = <&gpioi 0>; 67 gpios = <&gpioi 0 0>;
68 linux,default-trigger = "default-on"; 68 linux,default-trigger = "default-on";
69 }; 69 };
70 heartbeat { 70 heartbeat {
71 label = "Heartbeat"; 71 label = "Heartbeat";
72 gpios = <&gpioi 1>; 72 gpios = <&gpioi 1 0>;
73 linux,default-trigger = "heartbeat"; 73 linux,default-trigger = "heartbeat";
74 }; 74 };
75 led2 { 75 led2 {
76 label = "LED2"; 76 label = "LED2";
77 gpios = <&gpioi 2>; 77 gpios = <&gpioi 2 0>;
78 default-state = "off"; 78 default-state = "off";
79 }; 79 };
80 led3 { 80 led3 {
81 label = "LED3"; 81 label = "LED3";
82 gpios = <&gpioi 3>; 82 gpios = <&gpioi 3 0>;
83 default-state = "off"; 83 default-state = "off";
84 }; 84 };
85 led4 { 85 led4 {
86 label = "LED4"; 86 label = "LED4";
87 gpios = <&gpioi 4>; 87 gpios = <&gpioi 4 0>;
88 default-state = "off"; 88 default-state = "off";
89 }; 89 };
90 led5 { 90 led5 {
91 label = "LED5"; 91 label = "LED5";
92 gpios = <&gpioi 5>; 92 gpios = <&gpioi 5 0>;
93 default-state = "off"; 93 default-state = "off";
94 }; 94 };
95 led6 { 95 led6 {
96 label = "LED6"; 96 label = "LED6";
97 gpios = <&gpioi 6>; 97 gpios = <&gpioi 6 0>;
98 default-state = "off"; 98 default-state = "off";
99 }; 99 };
100 led7 { 100 led7 {
101 label = "LED7"; 101 label = "LED7";
102 gpios = <&gpioi 7>; 102 gpios = <&gpioi 7 0>;
103 default-state = "off"; 103 default-state = "off";
104 }; 104 };
105 led8 { 105 led8 {
106 label = "LED8"; 106 label = "LED8";
107 gpios = <&gpioi 8>; 107 gpios = <&gpioi 8 0>;
108 default-state = "off"; 108 default-state = "off";
109 }; 109 };
110 led9 { 110 led9 {
111 label = "LED9"; 111 label = "LED9";
112 gpios = <&gpioi 9>; 112 gpios = <&gpioi 9 0>;
113 default-state = "off"; 113 default-state = "off";
114 }; 114 };
115 led10 { 115 led10 {
116 label = "LED10"; 116 label = "LED10";
117 gpios = <&gpioi 10>; 117 gpios = <&gpioi 10 0>;
118 default-state = "off"; 118 default-state = "off";
119 }; 119 };
120 led11 { 120 led11 {
121 label = "LED11"; 121 label = "LED11";
122 gpios = <&gpioi 11>; 122 gpios = <&gpioi 11 0>;
123 default-state = "off"; 123 default-state = "off";
124 }; 124 };
125 }; 125 };
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
index edf56f4749e1..a098d7c05e96 100644
--- a/arch/arc/boot/dts/abilis_tb10x.dtsi
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -62,9 +62,8 @@
62 }; 62 };
63 63
64 iomux: iomux@FF10601c { 64 iomux: iomux@FF10601c {
65 #address-cells = <1>;
66 #size-cells = <1>;
67 compatible = "abilis,tb10x-iomux"; 65 compatible = "abilis,tb10x-iomux";
66 #gpio-range-cells = <3>;
68 reg = <0xFF10601c 0x4>; 67 reg = <0xFF10601c 0x4>;
69 }; 68 };
70 69
diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts
index 4fb2d6f655bd..bcf662d21a57 100644
--- a/arch/arc/boot/dts/angel4.dts
+++ b/arch/arc/boot/dts/angel4.dts
@@ -67,5 +67,9 @@
67 reg = <1>; 67 reg = <1>;
68 }; 68 };
69 }; 69 };
70
71 arcpmu0: pmu {
72 compatible = "snps,arc700-pmu";
73 };
70 }; 74 };
71}; 75};
diff --git a/arch/arc/configs/fpga_noramfs_defconfig b/arch/arc/configs/fpga_noramfs_defconfig
new file mode 100644
index 000000000000..5276a52f6a2f
--- /dev/null
+++ b/arch/arc/configs/fpga_noramfs_defconfig
@@ -0,0 +1,64 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set
5CONFIG_HIGH_RES_TIMERS=y
6CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y
8CONFIG_NAMESPACES=y
9# CONFIG_UTS_NS is not set
10# CONFIG_PID_NS is not set
11CONFIG_BLK_DEV_INITRD=y
12CONFIG_KALLSYMS_ALL=y
13CONFIG_EMBEDDED=y
14# CONFIG_SLUB_DEBUG is not set
15# CONFIG_COMPAT_BRK is not set
16CONFIG_KPROBES=y
17CONFIG_MODULES=y
18# CONFIG_LBDAF is not set
19# CONFIG_BLK_DEV_BSG is not set
20# CONFIG_IOSCHED_DEADLINE is not set
21# CONFIG_IOSCHED_CFQ is not set
22CONFIG_ARC_PLAT_FPGA_LEGACY=y
23CONFIG_ARC_BOARD_ML509=y
24# CONFIG_ARC_HAS_RTSC is not set
25CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
26CONFIG_PREEMPT=y
27# CONFIG_COMPACTION is not set
28# CONFIG_CROSS_MEMORY_ATTACH is not set
29CONFIG_NET=y
30CONFIG_PACKET=y
31CONFIG_UNIX=y
32CONFIG_UNIX_DIAG=y
33CONFIG_NET_KEY=y
34CONFIG_INET=y
35# CONFIG_IPV6 is not set
36# CONFIG_STANDALONE is not set
37# CONFIG_PREVENT_FIRMWARE_BUILD is not set
38# CONFIG_FIRMWARE_IN_KERNEL is not set
39# CONFIG_BLK_DEV is not set
40CONFIG_NETDEVICES=y
41CONFIG_ARC_EMAC=y
42CONFIG_LXT_PHY=y
43# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
44# CONFIG_INPUT_KEYBOARD is not set
45# CONFIG_INPUT_MOUSE is not set
46# CONFIG_SERIO is not set
47# CONFIG_LEGACY_PTYS is not set
48# CONFIG_DEVKMEM is not set
49CONFIG_SERIAL_ARC=y
50CONFIG_SERIAL_ARC_CONSOLE=y
51# CONFIG_HW_RANDOM is not set
52# CONFIG_HWMON is not set
53# CONFIG_VGA_CONSOLE is not set
54# CONFIG_HID is not set
55# CONFIG_USB_SUPPORT is not set
56# CONFIG_IOMMU_SUPPORT is not set
57CONFIG_EXT2_FS=y
58CONFIG_EXT2_FS_XATTR=y
59CONFIG_TMPFS=y
60# CONFIG_MISC_FILESYSTEMS is not set
61CONFIG_NFS_FS=y
62# CONFIG_ENABLE_WARN_DEPRECATED is not set
63# CONFIG_ENABLE_MUST_CHECK is not set
64CONFIG_XZ_DEC=y
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 115ad96480e6..cbf755e32a03 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com) 2 * Linux performance counter support for ARC
3 *
4 * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
3 * 5 *
4 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -10,4 +12,204 @@
10#ifndef __ASM_PERF_EVENT_H 12#ifndef __ASM_PERF_EVENT_H
11#define __ASM_PERF_EVENT_H 13#define __ASM_PERF_EVENT_H
12 14
15/* real maximum varies per CPU, this is the maximum supported by the driver */
16#define ARC_PMU_MAX_HWEVENTS 64
17
18#define ARC_REG_CC_BUILD 0xF6
19#define ARC_REG_CC_INDEX 0x240
20#define ARC_REG_CC_NAME0 0x241
21#define ARC_REG_CC_NAME1 0x242
22
23#define ARC_REG_PCT_BUILD 0xF5
24#define ARC_REG_PCT_COUNTL 0x250
25#define ARC_REG_PCT_COUNTH 0x251
26#define ARC_REG_PCT_SNAPL 0x252
27#define ARC_REG_PCT_SNAPH 0x253
28#define ARC_REG_PCT_CONFIG 0x254
29#define ARC_REG_PCT_CONTROL 0x255
30#define ARC_REG_PCT_INDEX 0x256
31
32#define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */
33#define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */
34
35struct arc_reg_pct_build {
36#ifdef CONFIG_CPU_BIG_ENDIAN
37 unsigned int m:8, c:8, r:6, s:2, v:8;
38#else
39 unsigned int v:8, s:2, r:6, c:8, m:8;
40#endif
41};
42
43struct arc_reg_cc_build {
44#ifdef CONFIG_CPU_BIG_ENDIAN
45 unsigned int c:16, r:8, v:8;
46#else
47 unsigned int v:8, r:8, c:16;
48#endif
49};
50
51#define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0)
52#define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1)
53#define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2)
54#define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3)
55#define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4)
56#define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5)
57#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 6)
58
59/*
60 * The "generalized" performance events seem to really be a copy
61 * of the available events on x86 processors; the mapping to ARC
62 * events is not always possible 1-to-1. Fortunately, there doesn't
63 * seem to be an exact definition for these events, so we can cheat
64 * a bit where necessary.
65 *
66 * In particular, the following PERF events may behave a bit differently
67 * compared to other architectures:
68 *
69 * PERF_COUNT_HW_CPU_CYCLES
70 * Cycles not in halted state
71 *
72 * PERF_COUNT_HW_REF_CPU_CYCLES
73 * Reference cycles not in halted state, same as PERF_COUNT_HW_CPU_CYCLES
74 * for now as we don't do Dynamic Voltage/Frequency Scaling (yet)
75 *
76 * PERF_COUNT_HW_BUS_CYCLES
77 * Unclear what this means, Intel uses 0x013c, which according to
78 * their datasheet means "unhalted reference cycles". It sounds similar
79 * to PERF_COUNT_HW_REF_CPU_CYCLES, and we use the same counter for it.
80 *
81 * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
82 * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
83 * The ARC 700 can either measure stalls per pipeline stage, or all stalls
84 * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
85 * and all pipeline flushes (e.g. caused by mispredicts, etc.) to
86 * STALLED_CYCLES_FRONTEND.
87 *
88 * We could start multiple performance counters and combine everything
89 * afterwards, but that makes it complicated.
90 *
91 * Note that I$ cache misses aren't counted by either of the two!
92 */
93
94static const char * const arc_pmu_ev_hw_map[] = {
95 [PERF_COUNT_HW_CPU_CYCLES] = "crun",
96 [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
97 [PERF_COUNT_HW_BUS_CYCLES] = "crun",
98 [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
99 [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail",
100 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
101 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
102 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
103 [PERF_COUNT_ARC_DCLM] = "dclm",
104 [PERF_COUNT_ARC_DCSM] = "dcsm",
105 [PERF_COUNT_ARC_ICM] = "icm",
106 [PERF_COUNT_ARC_BPOK] = "bpok",
107 [PERF_COUNT_ARC_EDTLB] = "edtlb",
108 [PERF_COUNT_ARC_EITLB] = "eitlb",
109};
110
111#define C(_x) PERF_COUNT_HW_CACHE_##_x
112#define CACHE_OP_UNSUPPORTED 0xffff
113
114static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
115 [C(L1D)] = {
116 [C(OP_READ)] = {
117 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
118 [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
119 },
120 [C(OP_WRITE)] = {
121 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
122 [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
123 },
124 [C(OP_PREFETCH)] = {
125 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
126 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
127 },
128 },
129 [C(L1I)] = {
130 [C(OP_READ)] = {
131 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
132 [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
133 },
134 [C(OP_WRITE)] = {
135 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
136 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
137 },
138 [C(OP_PREFETCH)] = {
139 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
140 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
141 },
142 },
143 [C(LL)] = {
144 [C(OP_READ)] = {
145 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
146 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
147 },
148 [C(OP_WRITE)] = {
149 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
150 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
151 },
152 [C(OP_PREFETCH)] = {
153 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
154 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
155 },
156 },
157 [C(DTLB)] = {
158 [C(OP_READ)] = {
159 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
160 [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
161 },
162 [C(OP_WRITE)] = {
163 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
164 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
165 },
166 [C(OP_PREFETCH)] = {
167 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
168 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
169 },
170 },
171 [C(ITLB)] = {
172 [C(OP_READ)] = {
173 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
174 [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB,
175 },
176 [C(OP_WRITE)] = {
177 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
178 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
179 },
180 [C(OP_PREFETCH)] = {
181 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
182 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
183 },
184 },
185 [C(BPU)] = {
186 [C(OP_READ)] = {
187 [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
188 [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES,
189 },
190 [C(OP_WRITE)] = {
191 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
192 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
193 },
194 [C(OP_PREFETCH)] = {
195 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
196 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
197 },
198 },
199 [C(NODE)] = {
200 [C(OP_READ)] = {
201 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
202 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
203 },
204 [C(OP_WRITE)] = {
205 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
206 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
207 },
208 [C(OP_PREFETCH)] = {
209 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
210 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
211 },
212 },
213};
214
13#endif /* __ASM_PERF_EVENT_H */ 215#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 2d50a4cdd7f3..45be21672011 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -80,8 +80,6 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
80 80
81#endif /* !__ASSEMBLY__ */ 81#endif /* !__ASSEMBLY__ */
82 82
83#define PREEMPT_ACTIVE 0x10000000
84
85/* 83/*
86 * thread information flags 84 * thread information flags
87 * - these are process state flags that various assembly files may need to 85 * - these are process state flags that various assembly files may need to
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 6f30484f34b7..68125dd766c6 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -8,6 +8,9 @@
8 8
9/******** no-legacy-syscalls-ABI *******/ 9/******** no-legacy-syscalls-ABI *******/
10 10
11#ifndef _UAPI_ASM_ARC_UNISTD_H
12#define _UAPI_ASM_ARC_UNISTD_H
13
11#define __ARCH_WANT_SYS_EXECVE 14#define __ARCH_WANT_SYS_EXECVE
12#define __ARCH_WANT_SYS_CLONE 15#define __ARCH_WANT_SYS_CLONE
13#define __ARCH_WANT_SYS_VFORK 16#define __ARCH_WANT_SYS_VFORK
@@ -32,3 +35,5 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
32/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */ 35/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
33#define __NR_sysfs (__NR_arch_specific_syscall + 3) 36#define __NR_sysfs (__NR_arch_specific_syscall + 3)
34__SYSCALL(__NR_sysfs, sys_sysfs) 37__SYSCALL(__NR_sysfs, sys_sysfs)
38
39#endif
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
index c242ef07ba70..8004b4fa6461 100644
--- a/arch/arc/kernel/Makefile
+++ b/arch/arc/kernel/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
19obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o 19obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o
20obj-$(CONFIG_KGDB) += kgdb.o 20obj-$(CONFIG_KGDB) += kgdb.o
21obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o 21obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
22obj-$(CONFIG_PERF_EVENTS) += perf_event.o
22 23
23obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o 24obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
24CFLAGS_fpu.o += -mdpfp 25CFLAGS_fpu.o += -mdpfp
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
new file mode 100644
index 000000000000..63177e4cb66d
--- /dev/null
+++ b/arch/arc/kernel/perf_event.c
@@ -0,0 +1,326 @@
1/*
2 * Linux performance counter support for ARC700 series
3 *
4 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This code is inspired by the perf support of various other architectures.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#include <linux/errno.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/perf_event.h>
17#include <linux/platform_device.h>
18#include <asm/arcregs.h>
19
20struct arc_pmu {
21 struct pmu pmu;
22 int counter_size; /* in bits */
23 int n_counters;
24 unsigned long used_mask[BITS_TO_LONGS(ARC_PMU_MAX_HWEVENTS)];
25 int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
26};
27
28/* read counter #idx; note that counter# != event# on ARC! */
29static uint64_t arc_pmu_read_counter(int idx)
30{
31 uint32_t tmp;
32 uint64_t result;
33
34 /*
35 * ARC supports making 'snapshots' of the counters, so we don't
36 * need to care about counters wrapping to 0 underneath our feet
37 */
38 write_aux_reg(ARC_REG_PCT_INDEX, idx);
39 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
40 write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
41 result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
42 result |= read_aux_reg(ARC_REG_PCT_SNAPL);
43
44 return result;
45}
46
47static void arc_perf_event_update(struct perf_event *event,
48 struct hw_perf_event *hwc, int idx)
49{
50 struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
51 uint64_t prev_raw_count, new_raw_count;
52 int64_t delta;
53
54 do {
55 prev_raw_count = local64_read(&hwc->prev_count);
56 new_raw_count = arc_pmu_read_counter(idx);
57 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
58 new_raw_count) != prev_raw_count);
59
60 delta = (new_raw_count - prev_raw_count) &
61 ((1ULL << arc_pmu->counter_size) - 1ULL);
62
63 local64_add(delta, &event->count);
64 local64_sub(delta, &hwc->period_left);
65}
66
67static void arc_pmu_read(struct perf_event *event)
68{
69 arc_perf_event_update(event, &event->hw, event->hw.idx);
70}
71
72static int arc_pmu_cache_event(u64 config)
73{
74 unsigned int cache_type, cache_op, cache_result;
75 int ret;
76
77 cache_type = (config >> 0) & 0xff;
78 cache_op = (config >> 8) & 0xff;
79 cache_result = (config >> 16) & 0xff;
80 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
81 return -EINVAL;
82 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
83 return -EINVAL;
84 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
85 return -EINVAL;
86
87 ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
88
89 if (ret == CACHE_OP_UNSUPPORTED)
90 return -ENOENT;
91
92 return ret;
93}
94
95/* initializes hw_perf_event structure if event is supported */
96static int arc_pmu_event_init(struct perf_event *event)
97{
98 struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
99 struct hw_perf_event *hwc = &event->hw;
100 int ret;
101
102 /* ARC 700 PMU does not support sampling events */
103 if (is_sampling_event(event))
104 return -ENOENT;
105
106 switch (event->attr.type) {
107 case PERF_TYPE_HARDWARE:
108 if (event->attr.config >= PERF_COUNT_HW_MAX)
109 return -ENOENT;
110 if (arc_pmu->ev_hw_idx[event->attr.config] < 0)
111 return -ENOENT;
112 hwc->config = arc_pmu->ev_hw_idx[event->attr.config];
113 pr_debug("initializing event %d with cfg %d\n",
114 (int) event->attr.config, (int) hwc->config);
115 return 0;
116 case PERF_TYPE_HW_CACHE:
117 ret = arc_pmu_cache_event(event->attr.config);
118 if (ret < 0)
119 return ret;
120 hwc->config = arc_pmu->ev_hw_idx[ret];
121 return 0;
122 default:
123 return -ENOENT;
124 }
125}
126
127/* starts all counters */
128static void arc_pmu_enable(struct pmu *pmu)
129{
130 uint32_t tmp;
131 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
132 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
133}
134
135/* stops all counters */
136static void arc_pmu_disable(struct pmu *pmu)
137{
138 uint32_t tmp;
139 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
140 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
141}
142
143/*
144 * Assigns hardware counter to hardware condition.
145 * Note that there is no separate start/stop mechanism;
146 * stopping is achieved by assigning the 'never' condition
147 */
148static void arc_pmu_start(struct perf_event *event, int flags)
149{
150 struct hw_perf_event *hwc = &event->hw;
151 int idx = hwc->idx;
152
153 if (WARN_ON_ONCE(idx == -1))
154 return;
155
156 if (flags & PERF_EF_RELOAD)
157 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
158
159 event->hw.state = 0;
160
161 /* enable ARC pmu here */
162 write_aux_reg(ARC_REG_PCT_INDEX, idx);
163 write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config);
164}
165
166static void arc_pmu_stop(struct perf_event *event, int flags)
167{
168 struct hw_perf_event *hwc = &event->hw;
169 int idx = hwc->idx;
170
171 if (!(event->hw.state & PERF_HES_STOPPED)) {
172 /* stop ARC pmu here */
173 write_aux_reg(ARC_REG_PCT_INDEX, idx);
174
175 /* condition code #0 is always "never" */
176 write_aux_reg(ARC_REG_PCT_CONFIG, 0);
177
178 event->hw.state |= PERF_HES_STOPPED;
179 }
180
181 if ((flags & PERF_EF_UPDATE) &&
182 !(event->hw.state & PERF_HES_UPTODATE)) {
183 arc_perf_event_update(event, &event->hw, idx);
184 event->hw.state |= PERF_HES_UPTODATE;
185 }
186}
187
188static void arc_pmu_del(struct perf_event *event, int flags)
189{
190 struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
191
192 arc_pmu_stop(event, PERF_EF_UPDATE);
193 __clear_bit(event->hw.idx, arc_pmu->used_mask);
194
195 perf_event_update_userpage(event);
196}
197
198/* allocate hardware counter and optionally start counting */
199static int arc_pmu_add(struct perf_event *event, int flags)
200{
201 struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
202 struct hw_perf_event *hwc = &event->hw;
203 int idx = hwc->idx;
204
205 if (__test_and_set_bit(idx, arc_pmu->used_mask)) {
206 idx = find_first_zero_bit(arc_pmu->used_mask,
207 arc_pmu->n_counters);
208 if (idx == arc_pmu->n_counters)
209 return -EAGAIN;
210
211 __set_bit(idx, arc_pmu->used_mask);
212 hwc->idx = idx;
213 }
214
215 write_aux_reg(ARC_REG_PCT_INDEX, idx);
216 write_aux_reg(ARC_REG_PCT_CONFIG, 0);
217 write_aux_reg(ARC_REG_PCT_COUNTL, 0);
218 write_aux_reg(ARC_REG_PCT_COUNTH, 0);
219 local64_set(&hwc->prev_count, 0);
220
221 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
222 if (flags & PERF_EF_START)
223 arc_pmu_start(event, PERF_EF_RELOAD);
224
225 perf_event_update_userpage(event);
226
227 return 0;
228}
229
230static int arc_pmu_device_probe(struct platform_device *pdev)
231{
232 struct arc_pmu *arc_pmu;
233 struct arc_reg_pct_build pct_bcr;
234 struct arc_reg_cc_build cc_bcr;
235 int i, j, ret;
236
237 union cc_name {
238 struct {
239 uint32_t word0, word1;
240 char sentinel;
241 } indiv;
242 char str[9];
243 } cc_name;
244
245
246 READ_BCR(ARC_REG_PCT_BUILD, pct_bcr);
247 if (!pct_bcr.v) {
248 pr_err("This core does not have performance counters!\n");
249 return -ENODEV;
250 }
251
252 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu),
253 GFP_KERNEL);
254 if (!arc_pmu)
255 return -ENOMEM;
256
257 arc_pmu->n_counters = pct_bcr.c;
258 BUG_ON(arc_pmu->n_counters > ARC_PMU_MAX_HWEVENTS);
259
260 arc_pmu->counter_size = 32 + (pct_bcr.s << 4);
261 pr_info("ARC PMU found with %d counters of size %d bits\n",
262 arc_pmu->n_counters, arc_pmu->counter_size);
263
264 READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
265
266 if (!cc_bcr.v)
267 pr_err("Strange! Performance counters exist, but no countable conditions?\n");
268
269 pr_info("ARC PMU has %d countable conditions\n", cc_bcr.c);
270
271 cc_name.str[8] = 0;
272 for (i = 0; i < PERF_COUNT_HW_MAX; i++)
273 arc_pmu->ev_hw_idx[i] = -1;
274
275 for (j = 0; j < cc_bcr.c; j++) {
276 write_aux_reg(ARC_REG_CC_INDEX, j);
277 cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
278 cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
279 for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
280 if (arc_pmu_ev_hw_map[i] &&
281 !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
282 strlen(arc_pmu_ev_hw_map[i])) {
283 pr_debug("mapping %d to idx %d with name %s\n",
284 i, j, cc_name.str);
285 arc_pmu->ev_hw_idx[i] = j;
286 }
287 }
288 }
289
290 arc_pmu->pmu = (struct pmu) {
291 .pmu_enable = arc_pmu_enable,
292 .pmu_disable = arc_pmu_disable,
293 .event_init = arc_pmu_event_init,
294 .add = arc_pmu_add,
295 .del = arc_pmu_del,
296 .start = arc_pmu_start,
297 .stop = arc_pmu_stop,
298 .read = arc_pmu_read,
299 };
300
301 ret = perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
302
303 return ret;
304}
305
306#ifdef CONFIG_OF
307static const struct of_device_id arc_pmu_match[] = {
308 { .compatible = "snps,arc700-pmu" },
309 {},
310};
311MODULE_DEVICE_TABLE(of, arc_pmu_match);
312#endif
313
314static struct platform_driver arc_pmu_driver = {
315 .driver = {
316 .name = "arc700-pmu",
317 .of_match_table = of_match_ptr(arc_pmu_match),
318 },
319 .probe = arc_pmu_device_probe,
320};
321
322module_platform_driver(arc_pmu_driver);
323
324MODULE_LICENSE("GPL");
325MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>");
326MODULE_DESCRIPTION("ARC PMU driver");
diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig
index 1ab386bb5da8..6994c188dc88 100644
--- a/arch/arc/plat-tb10x/Kconfig
+++ b/arch/arc/plat-tb10x/Kconfig
@@ -20,8 +20,10 @@ menuconfig ARC_PLAT_TB10X
20 bool "Abilis TB10x" 20 bool "Abilis TB10x"
21 select COMMON_CLK 21 select COMMON_CLK
22 select PINCTRL 22 select PINCTRL
23 select PINCTRL_TB10X
23 select PINMUX 24 select PINMUX
24 select ARCH_REQUIRE_GPIOLIB 25 select ARCH_REQUIRE_GPIOLIB
26 select GPIO_TB10X
25 select TB10X_IRQC 27 select TB10X_IRQC
26 help 28 help
27 Support for platforms based on the TB10x home media gateway SOC by 29 Support for platforms based on the TB10x home media gateway SOC by
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 214b698cefea..c1f1a7eee953 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -25,7 +25,7 @@ config ARM
25 select HARDIRQS_SW_RESEND 25 select HARDIRQS_SW_RESEND
26 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL 26 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
27 select HAVE_ARCH_KGDB 27 select HAVE_ARCH_KGDB
28 select HAVE_ARCH_SECCOMP_FILTER 28 select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
29 select HAVE_ARCH_TRACEHOOK 29 select HAVE_ARCH_TRACEHOOK
30 select HAVE_BPF_JIT 30 select HAVE_BPF_JIT
31 select HAVE_CONTEXT_TRACKING 31 select HAVE_CONTEXT_TRACKING
@@ -1496,6 +1496,7 @@ config HAVE_ARM_ARCH_TIMER
1496 bool "Architected timer support" 1496 bool "Architected timer support"
1497 depends on CPU_V7 1497 depends on CPU_V7
1498 select ARM_ARCH_TIMER 1498 select ARM_ARCH_TIMER
1499 select GENERIC_CLOCKEVENTS
1499 help 1500 help
1500 This option enables support for the ARM architected timer 1501 This option enables support for the ARM architected timer
1501 1502
@@ -1719,7 +1720,6 @@ config AEABI
1719config OABI_COMPAT 1720config OABI_COMPAT
1720 bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" 1721 bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
1721 depends on AEABI && !THUMB2_KERNEL 1722 depends on AEABI && !THUMB2_KERNEL
1722 default y
1723 help 1723 help
1724 This option preserves the old syscall interface along with the 1724 This option preserves the old syscall interface along with the
1725 new (ARM EABI) one. It also provides a compatibility layer to 1725 new (ARM EABI) one. It also provides a compatibility layer to
@@ -1727,11 +1727,16 @@ config OABI_COMPAT
1727 in memory differs between the legacy ABI and the new ARM EABI 1727 in memory differs between the legacy ABI and the new ARM EABI
1728 (only for non "thumb" binaries). This option adds a tiny 1728 (only for non "thumb" binaries). This option adds a tiny
1729 overhead to all syscalls and produces a slightly larger kernel. 1729 overhead to all syscalls and produces a slightly larger kernel.
1730
1731 The seccomp filter system will not be available when this is
1732 selected, since there is no way yet to sensibly distinguish
1733 between calling conventions during filtering.
1734
1730 If you know you'll be using only pure EABI user space then you 1735 If you know you'll be using only pure EABI user space then you
1731 can say N here. If this option is not selected and you attempt 1736 can say N here. If this option is not selected and you attempt
1732 to execute a legacy ABI binary then the result will be 1737 to execute a legacy ABI binary then the result will be
1733 UNPREDICTABLE (in fact it can be predicted that it won't work 1738 UNPREDICTABLE (in fact it can be predicted that it won't work
1734 at all). If in doubt say Y. 1739 at all). If in doubt say N.
1735 1740
1736config ARCH_HAS_HOLES_MEMORYMODEL 1741config ARCH_HAS_HOLES_MEMORYMODEL
1737 bool 1742 bool
diff --git a/arch/arm/boot/dts/am335x-base0033.dts b/arch/arm/boot/dts/am335x-base0033.dts
index b4f95c2bbf74..72a9b3fc4251 100644
--- a/arch/arm/boot/dts/am335x-base0033.dts
+++ b/arch/arm/boot/dts/am335x-base0033.dts
@@ -13,4 +13,83 @@
13/ { 13/ {
14 model = "IGEP COM AM335x on AQUILA Expansion"; 14 model = "IGEP COM AM335x on AQUILA Expansion";
15 compatible = "isee,am335x-base0033", "isee,am335x-igep0033", "ti,am33xx"; 15 compatible = "isee,am335x-base0033", "isee,am335x-igep0033", "ti,am33xx";
16
17 hdmi {
18 compatible = "ti,tilcdc,slave";
19 i2c = <&i2c0>;
20 pinctrl-names = "default", "off";
21 pinctrl-0 = <&nxp_hdmi_pins>;
22 pinctrl-1 = <&nxp_hdmi_off_pins>;
23 status = "okay";
24 };
25
26 leds_base {
27 pinctrl-names = "default";
28 pinctrl-0 = <&leds_base_pins>;
29
30 compatible = "gpio-leds";
31
32 led@0 {
33 label = "base:red:user";
34 gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>; /* gpio1_21 */
35 default-state = "off";
36 };
37
38 led@1 {
39 label = "base:green:user";
40 gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; /* gpio2_0 */
41 default-state = "off";
42 };
43 };
44};
45
46&am33xx_pinmux {
47 nxp_hdmi_pins: pinmux_nxp_hdmi_pins {
48 pinctrl-single,pins = <
49 0x1b0 (PIN_OUTPUT | MUX_MODE3) /* xdma_event_intr0.clkout1 */
50 0xa0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data0 */
51 0xa4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data1 */
52 0xa8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data2 */
53 0xac (PIN_OUTPUT | MUX_MODE0) /* lcd_data3 */
54 0xb0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data4 */
55 0xb4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data5 */
56 0xb8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data6 */
57 0xbc (PIN_OUTPUT | MUX_MODE0) /* lcd_data7 */
58 0xc0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data8 */
59 0xc4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data9 */
60 0xc8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data10 */
61 0xcc (PIN_OUTPUT | MUX_MODE0) /* lcd_data11 */
62 0xd0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data12 */
63 0xd4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data13 */
64 0xd8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data14 */
65 0xdc (PIN_OUTPUT | MUX_MODE0) /* lcd_data15 */
66 0xe0 (PIN_OUTPUT | MUX_MODE0) /* lcd_vsync */
67 0xe4 (PIN_OUTPUT | MUX_MODE0) /* lcd_hsync */
68 0xe8 (PIN_OUTPUT | MUX_MODE0) /* lcd_pclk */
69 0xec (PIN_OUTPUT | MUX_MODE0) /* lcd_ac_bias_en */
70 >;
71 };
72 nxp_hdmi_off_pins: pinmux_nxp_hdmi_off_pins {
73 pinctrl-single,pins = <
74 0x1b0 (PIN_OUTPUT | MUX_MODE3) /* xdma_event_intr0.clkout1 */
75 >;
76 };
77
78 leds_base_pins: pinmux_leds_base_pins {
79 pinctrl-single,pins = <
80 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
81 0x88 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_csn3.gpio2_0 */
82 >;
83 };
84};
85
86&lcdc {
87 status = "okay";
88};
89
90&i2c0 {
91 eeprom: eeprom@50 {
92 compatible = "at,24c256";
93 reg = <0x50>;
94 };
16}; 95};
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index 619624479311..7063311a58d9 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -199,6 +199,35 @@
199 pinctrl-0 = <&uart0_pins>; 199 pinctrl-0 = <&uart0_pins>;
200}; 200};
201 201
202&usb {
203 status = "okay";
204
205 control@44e10000 {
206 status = "okay";
207 };
208
209 usb-phy@47401300 {
210 status = "okay";
211 };
212
213 usb-phy@47401b00 {
214 status = "okay";
215 };
216
217 usb@47401000 {
218 status = "okay";
219 };
220
221 usb@47401800 {
222 status = "okay";
223 dr_mode = "host";
224 };
225
226 dma-controller@07402000 {
227 status = "okay";
228 };
229};
230
202#include "tps65910.dtsi" 231#include "tps65910.dtsi"
203 232
204&tps { 233&tps {
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index e99dfaf70052..03fcbf0a88a8 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -7,11 +7,11 @@
7 */ 7 */
8/dts-v1/; 8/dts-v1/;
9 9
10#include "omap34xx.dtsi" 10#include "am3517.dtsi"
11 11
12/ { 12/ {
13 model = "TI AM3517 EVM (AM3517/05)"; 13 model = "TI AM3517 EVM (AM3517/05 TMDSEVM3517)";
14 compatible = "ti,am3517-evm", "ti,omap3"; 14 compatible = "ti,am3517-evm", "ti,am3517", "ti,omap3";
15 15
16 memory { 16 memory {
17 device_type = "memory"; 17 device_type = "memory";
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
new file mode 100644
index 000000000000..2fbe02faa8b1
--- /dev/null
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -0,0 +1,63 @@
1/*
2 * Device Tree Source for am3517 SoC
3 *
4 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 */
10
11#include "omap3.dtsi"
12
13/ {
14 aliases {
15 serial3 = &uart4;
16 };
17
18 ocp {
19 am35x_otg_hs: am35x_otg_hs@5c040000 {
20 compatible = "ti,omap3-musb";
21 ti,hwmods = "am35x_otg_hs";
22 status = "disabled";
23 reg = <0x5c040000 0x1000>;
24 interrupts = <71>;
25 interrupt-names = "mc";
26 };
27
28 davinci_emac: ethernet@0x5c000000 {
29 compatible = "ti,am3517-emac";
30 ti,hwmods = "davinci_emac";
31 status = "disabled";
32 reg = <0x5c000000 0x30000>;
33 interrupts = <67 68 69 70>;
34 ti,davinci-ctrl-reg-offset = <0x10000>;
35 ti,davinci-ctrl-mod-reg-offset = <0>;
36 ti,davinci-ctrl-ram-offset = <0x20000>;
37 ti,davinci-ctrl-ram-size = <0x2000>;
38 ti,davinci-rmii-en = /bits/ 8 <1>;
39 local-mac-address = [ 00 00 00 00 00 00 ];
40 };
41
42 davinci_mdio: ethernet@0x5c030000 {
43 compatible = "ti,davinci_mdio";
44 ti,hwmods = "davinci_mdio";
45 status = "disabled";
46 reg = <0x5c030000 0x1000>;
47 bus_freq = <1000000>;
48 #address-cells = <1>;
49 #size-cells = <0>;
50 };
51
52 uart4: serial@4809e000 {
53 compatible = "ti,omap3-uart";
54 ti,hwmods = "uart4";
55 status = "disabled";
56 reg = <0x4809e000 0x400>;
57 interrupts = <84>;
58 dmas = <&sdma 55 &sdma 54>;
59 dma-names = "tx", "rx";
60 clock-frequency = <48000000>;
61 };
62 };
63};
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 90ce29dbe119..08a56bcfc724 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -99,22 +99,22 @@
99 spi-max-frequency = <50000000>; 99 spi-max-frequency = <50000000>;
100 }; 100 };
101 }; 101 };
102 };
102 103
103 pcie-controller { 104 pcie-controller {
105 status = "okay";
106 /*
107 * The two PCIe units are accessible through
108 * both standard PCIe slots and mini-PCIe
109 * slots on the board.
110 */
111 pcie@1,0 {
112 /* Port 0, Lane 0 */
113 status = "okay";
114 };
115 pcie@2,0 {
116 /* Port 1, Lane 0 */
104 status = "okay"; 117 status = "okay";
105 /*
106 * The two PCIe units are accessible through
107 * both standard PCIe slots and mini-PCIe
108 * slots on the board.
109 */
110 pcie@1,0 {
111 /* Port 0, Lane 0 */
112 status = "okay";
113 };
114 pcie@2,0 {
115 /* Port 1, Lane 0 */
116 status = "okay";
117 };
118 }; 118 };
119 }; 119 };
120 }; 120 };
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 00d6a798c705..7f10f627ae5b 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -118,7 +118,7 @@
118 118
119 coherency-fabric@20200 { 119 coherency-fabric@20200 {
120 compatible = "marvell,coherency-fabric"; 120 compatible = "marvell,coherency-fabric";
121 reg = <0x20200 0xb0>, <0x21810 0x1c>; 121 reg = <0x20200 0xb0>, <0x21010 0x1c>;
122 }; 122 };
123 123
124 serial@12000 { 124 serial@12000 {
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index 3f5e6121c730..98335fb34b7a 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -47,7 +47,7 @@
47 /* 47 /*
48 * MV78230 has 2 PCIe units Gen2.0: One unit can be 48 * MV78230 has 2 PCIe units Gen2.0: One unit can be
49 * configured as x4 or quad x1 lanes. One unit is 49 * configured as x4 or quad x1 lanes. One unit is
50 * x4/x1. 50 * x1 only.
51 */ 51 */
52 pcie-controller { 52 pcie-controller {
53 compatible = "marvell,armada-xp-pcie"; 53 compatible = "marvell,armada-xp-pcie";
@@ -62,10 +62,10 @@
62 62
63 ranges = 63 ranges =
64 <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */ 64 <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */
65 0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000 /* Port 2.0 registers */
66 0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000 /* Port 0.1 registers */ 65 0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000 /* Port 0.1 registers */
67 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */ 66 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */
68 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */ 67 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */
68 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */
69 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */ 69 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
70 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */ 70 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
71 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */ 71 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
@@ -74,8 +74,8 @@
74 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */ 74 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */
75 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */ 75 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
76 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */ 76 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */
77 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */ 77 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
78 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */>; 78 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */>;
79 79
80 pcie@1,0 { 80 pcie@1,0 {
81 device_type = "pci"; 81 device_type = "pci";
@@ -145,20 +145,20 @@
145 status = "disabled"; 145 status = "disabled";
146 }; 146 };
147 147
148 pcie@9,0 { 148 pcie@5,0 {
149 device_type = "pci"; 149 device_type = "pci";
150 assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; 150 assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
151 reg = <0x4800 0 0 0 0>; 151 reg = <0x2800 0 0 0 0>;
152 #address-cells = <3>; 152 #address-cells = <3>;
153 #size-cells = <2>; 153 #size-cells = <2>;
154 #interrupt-cells = <1>; 154 #interrupt-cells = <1>;
155 ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0 155 ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
156 0x81000000 0 0 0x81000000 0x9 0 1 0>; 156 0x81000000 0 0 0x81000000 0x5 0 1 0>;
157 interrupt-map-mask = <0 0 0 0>; 157 interrupt-map-mask = <0 0 0 0>;
158 interrupt-map = <0 0 0 0 &mpic 99>; 158 interrupt-map = <0 0 0 0 &mpic 62>;
159 marvell,pcie-port = <2>; 159 marvell,pcie-port = <1>;
160 marvell,pcie-lane = <0>; 160 marvell,pcie-lane = <0>;
161 clocks = <&gateclk 26>; 161 clocks = <&gateclk 9>;
162 status = "disabled"; 162 status = "disabled";
163 }; 163 };
164 }; 164 };
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 3e9fd1353f89..66609684d41b 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -48,7 +48,7 @@
48 /* 48 /*
49 * MV78260 has 3 PCIe units Gen2.0: Two units can be 49 * MV78260 has 3 PCIe units Gen2.0: Two units can be
50 * configured as x4 or quad x1 lanes. One unit is 50 * configured as x4 or quad x1 lanes. One unit is
51 * x4/x1. 51 * x4 only.
52 */ 52 */
53 pcie-controller { 53 pcie-controller {
54 compatible = "marvell,armada-xp-pcie"; 54 compatible = "marvell,armada-xp-pcie";
@@ -68,7 +68,9 @@
68 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */ 68 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */
69 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */ 69 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */
70 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */ 70 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */
71 0x82000000 0 0x82000 MBUS_ID(0xf0, 0x01) 0x82000 0 0x00002000 /* Port 3.0 registers */ 71 0x82000000 0 0x84000 MBUS_ID(0xf0, 0x01) 0x84000 0 0x00002000 /* Port 1.1 registers */
72 0x82000000 0 0x88000 MBUS_ID(0xf0, 0x01) 0x88000 0 0x00002000 /* Port 1.2 registers */
73 0x82000000 0 0x8c000 MBUS_ID(0xf0, 0x01) 0x8c000 0 0x00002000 /* Port 1.3 registers */
72 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */ 74 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
73 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */ 75 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
74 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */ 76 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
@@ -77,10 +79,18 @@
77 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */ 79 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */
78 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */ 80 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
79 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */ 81 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */
80 0x82000000 0x9 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */ 82
81 0x81000000 0x9 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */ 83 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
82 0x82000000 0xa 0 MBUS_ID(0x08, 0xf8) 0 1 0 /* Port 3.0 MEM */ 84 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */
83 0x81000000 0xa 0 MBUS_ID(0x08, 0xf0) 0 1 0 /* Port 3.0 IO */>; 85 0x82000000 0x6 0 MBUS_ID(0x08, 0xd8) 0 1 0 /* Port 1.1 MEM */
86 0x81000000 0x6 0 MBUS_ID(0x08, 0xd0) 0 1 0 /* Port 1.1 IO */
87 0x82000000 0x7 0 MBUS_ID(0x08, 0xb8) 0 1 0 /* Port 1.2 MEM */
88 0x81000000 0x7 0 MBUS_ID(0x08, 0xb0) 0 1 0 /* Port 1.2 IO */
89 0x82000000 0x8 0 MBUS_ID(0x08, 0x78) 0 1 0 /* Port 1.3 MEM */
90 0x81000000 0x8 0 MBUS_ID(0x08, 0x70) 0 1 0 /* Port 1.3 IO */
91
92 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
93 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */>;
84 94
85 pcie@1,0 { 95 pcie@1,0 {
86 device_type = "pci"; 96 device_type = "pci";
@@ -106,8 +116,8 @@
106 #address-cells = <3>; 116 #address-cells = <3>;
107 #size-cells = <2>; 117 #size-cells = <2>;
108 #interrupt-cells = <1>; 118 #interrupt-cells = <1>;
109 ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 119 ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
110 0x81000000 0 0 0x81000000 0x2 0 1 0>; 120 0x81000000 0 0 0x81000000 0x2 0 1 0>;
111 interrupt-map-mask = <0 0 0 0>; 121 interrupt-map-mask = <0 0 0 0>;
112 interrupt-map = <0 0 0 0 &mpic 59>; 122 interrupt-map = <0 0 0 0 &mpic 59>;
113 marvell,pcie-port = <0>; 123 marvell,pcie-port = <0>;
@@ -150,37 +160,88 @@
150 status = "disabled"; 160 status = "disabled";
151 }; 161 };
152 162
153 pcie@9,0 { 163 pcie@5,0 {
154 device_type = "pci"; 164 device_type = "pci";
155 assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; 165 assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
156 reg = <0x4800 0 0 0 0>; 166 reg = <0x2800 0 0 0 0>;
157 #address-cells = <3>; 167 #address-cells = <3>;
158 #size-cells = <2>; 168 #size-cells = <2>;
159 #interrupt-cells = <1>; 169 #interrupt-cells = <1>;
160 ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0 170 ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
161 0x81000000 0 0 0x81000000 0x9 0 1 0>; 171 0x81000000 0 0 0x81000000 0x5 0 1 0>;
162 interrupt-map-mask = <0 0 0 0>; 172 interrupt-map-mask = <0 0 0 0>;
163 interrupt-map = <0 0 0 0 &mpic 99>; 173 interrupt-map = <0 0 0 0 &mpic 62>;
164 marvell,pcie-port = <2>; 174 marvell,pcie-port = <1>;
165 marvell,pcie-lane = <0>; 175 marvell,pcie-lane = <0>;
166 clocks = <&gateclk 26>; 176 clocks = <&gateclk 9>;
167 status = "disabled"; 177 status = "disabled";
168 }; 178 };
169 179
170 pcie@10,0 { 180 pcie@6,0 {
171 device_type = "pci"; 181 device_type = "pci";
172 assigned-addresses = <0x82000800 0 0x82000 0 0x2000>; 182 assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
173 reg = <0x5000 0 0 0 0>; 183 reg = <0x3000 0 0 0 0>;
174 #address-cells = <3>; 184 #address-cells = <3>;
175 #size-cells = <2>; 185 #size-cells = <2>;
176 #interrupt-cells = <1>; 186 #interrupt-cells = <1>;
177 ranges = <0x82000000 0 0 0x82000000 0xa 0 1 0 187 ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0
178 0x81000000 0 0 0x81000000 0xa 0 1 0>; 188 0x81000000 0 0 0x81000000 0x6 0 1 0>;
179 interrupt-map-mask = <0 0 0 0>; 189 interrupt-map-mask = <0 0 0 0>;
180 interrupt-map = <0 0 0 0 &mpic 103>; 190 interrupt-map = <0 0 0 0 &mpic 63>;
181 marvell,pcie-port = <3>; 191 marvell,pcie-port = <1>;
192 marvell,pcie-lane = <1>;
193 clocks = <&gateclk 10>;
194 status = "disabled";
195 };
196
197 pcie@7,0 {
198 device_type = "pci";
199 assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
200 reg = <0x3800 0 0 0 0>;
201 #address-cells = <3>;
202 #size-cells = <2>;
203 #interrupt-cells = <1>;
204 ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0
205 0x81000000 0 0 0x81000000 0x7 0 1 0>;
206 interrupt-map-mask = <0 0 0 0>;
207 interrupt-map = <0 0 0 0 &mpic 64>;
208 marvell,pcie-port = <1>;
209 marvell,pcie-lane = <2>;
210 clocks = <&gateclk 11>;
211 status = "disabled";
212 };
213
214 pcie@8,0 {
215 device_type = "pci";
216 assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
217 reg = <0x4000 0 0 0 0>;
218 #address-cells = <3>;
219 #size-cells = <2>;
220 #interrupt-cells = <1>;
221 ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0
222 0x81000000 0 0 0x81000000 0x8 0 1 0>;
223 interrupt-map-mask = <0 0 0 0>;
224 interrupt-map = <0 0 0 0 &mpic 65>;
225 marvell,pcie-port = <1>;
226 marvell,pcie-lane = <3>;
227 clocks = <&gateclk 12>;
228 status = "disabled";
229 };
230
231 pcie@9,0 {
232 device_type = "pci";
233 assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
234 reg = <0x4800 0 0 0 0>;
235 #address-cells = <3>;
236 #size-cells = <2>;
237 #interrupt-cells = <1>;
238 ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
239 0x81000000 0 0 0x81000000 0x9 0 1 0>;
240 interrupt-map-mask = <0 0 0 0>;
241 interrupt-map = <0 0 0 0 &mpic 99>;
242 marvell,pcie-port = <2>;
182 marvell,pcie-lane = <0>; 243 marvell,pcie-lane = <0>;
183 clocks = <&gateclk 27>; 244 clocks = <&gateclk 26>;
184 status = "disabled"; 245 status = "disabled";
185 }; 246 };
186 }; 247 };
diff --git a/arch/arm/boot/dts/at91sam9x5_usart3.dtsi b/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
index 2347e9563cef..6801106fa1f8 100644
--- a/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
@@ -11,6 +11,10 @@
11#include <dt-bindings/interrupt-controller/irq.h> 11#include <dt-bindings/interrupt-controller/irq.h>
12 12
13/ { 13/ {
14 aliases {
15 serial4 = &usart3;
16 };
17
14 ahb { 18 ahb {
15 apb { 19 apb {
16 pinctrl@fffff400 { 20 pinctrl@fffff400 {
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 1e12aeff403b..aa537ed13f0a 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -85,6 +85,8 @@
85 reg = <0x7e205000 0x1000>; 85 reg = <0x7e205000 0x1000>;
86 interrupts = <2 21>; 86 interrupts = <2 21>;
87 clocks = <&clk_i2c>; 87 clocks = <&clk_i2c>;
88 #address-cells = <1>;
89 #size-cells = <0>;
88 status = "disabled"; 90 status = "disabled";
89 }; 91 };
90 92
@@ -93,6 +95,8 @@
93 reg = <0x7e804000 0x1000>; 95 reg = <0x7e804000 0x1000>;
94 interrupts = <2 21>; 96 interrupts = <2 21>;
95 clocks = <&clk_i2c>; 97 clocks = <&clk_i2c>;
98 #address-cells = <1>;
99 #size-cells = <0>;
96 status = "disabled"; 100 status = "disabled";
97 }; 101 };
98 102
diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi
index dc259e8b8a73..9b186ac06c8b 100644
--- a/arch/arm/boot/dts/cros5250-common.dtsi
+++ b/arch/arm/boot/dts/cros5250-common.dtsi
@@ -27,6 +27,13 @@
27 i2c2_bus: i2c2-bus { 27 i2c2_bus: i2c2-bus {
28 samsung,pin-pud = <0>; 28 samsung,pin-pud = <0>;
29 }; 29 };
30
31 max77686_irq: max77686-irq {
32 samsung,pins = "gpx3-2";
33 samsung,pin-function = <0>;
34 samsung,pin-pud = <0>;
35 samsung,pin-drv = <0>;
36 };
30 }; 37 };
31 38
32 i2c@12C60000 { 39 i2c@12C60000 {
@@ -35,6 +42,11 @@
35 42
36 max77686@09 { 43 max77686@09 {
37 compatible = "maxim,max77686"; 44 compatible = "maxim,max77686";
45 interrupt-parent = <&gpx3>;
46 interrupts = <2 0>;
47 pinctrl-names = "default";
48 pinctrl-0 = <&max77686_irq>;
49 wakeup-source;
38 reg = <0x09>; 50 reg = <0x09>;
39 51
40 voltage-regulators { 52 voltage-regulators {
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 59154dc15fe4..fb28b2ecb1db 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -161,7 +161,7 @@
161 clocks = <&clks 197>, <&clks 3>, 161 clocks = <&clks 197>, <&clks 3>,
162 <&clks 197>, <&clks 107>, 162 <&clks 197>, <&clks 107>,
163 <&clks 0>, <&clks 118>, 163 <&clks 0>, <&clks 118>,
164 <&clks 62>, <&clks 139>, 164 <&clks 0>, <&clks 139>,
165 <&clks 0>; 165 <&clks 0>;
166 clock-names = "core", "rxtx0", 166 clock-names = "core", "rxtx0",
167 "rxtx1", "rxtx2", 167 "rxtx1", "rxtx2",
diff --git a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
index 9c18adf788f7..f577b7df9a29 100644
--- a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
+++ b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
@@ -44,8 +44,8 @@
44 gpmc,wr-access-ns = <186>; 44 gpmc,wr-access-ns = <186>;
45 gpmc,cycle2cycle-samecsen; 45 gpmc,cycle2cycle-samecsen;
46 gpmc,cycle2cycle-diffcsen; 46 gpmc,cycle2cycle-diffcsen;
47 vmmc-supply = <&vddvario>; 47 vddvario-supply = <&vddvario>;
48 vmmc_aux-supply = <&vdd33a>; 48 vdd33a-supply = <&vdd33a>;
49 reg-io-width = <4>; 49 reg-io-width = <4>;
50 smsc,save-mac-address; 50 smsc,save-mac-address;
51 }; 51 };
diff --git a/arch/arm/boot/dts/omap-zoom-common.dtsi b/arch/arm/boot/dts/omap-zoom-common.dtsi
index b0ee342598f0..68221fab978d 100644
--- a/arch/arm/boot/dts/omap-zoom-common.dtsi
+++ b/arch/arm/boot/dts/omap-zoom-common.dtsi
@@ -13,7 +13,7 @@
13 * they probably share the same GPIO IRQ 13 * they probably share the same GPIO IRQ
14 * REVISIT: Add timing support from slls644g.pdf 14 * REVISIT: Add timing support from slls644g.pdf
15 */ 15 */
16 8250@3,0 { 16 uart@3,0 {
17 compatible = "ns16550a"; 17 compatible = "ns16550a";
18 reg = <3 0 0x100>; 18 reg = <3 0 0x100>;
19 bank-width = <2>; 19 bank-width = <2>;
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index a2bfcde858a6..d0c5b37e248c 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <dt-bindings/gpio/gpio.h> 11#include <dt-bindings/gpio/gpio.h>
12#include <dt-bindings/interrupt-controller/irq.h>
12#include <dt-bindings/pinctrl/omap.h> 13#include <dt-bindings/pinctrl/omap.h>
13 14
14#include "skeleton.dtsi" 15#include "skeleton.dtsi"
@@ -21,6 +22,8 @@
21 serial0 = &uart1; 22 serial0 = &uart1;
22 serial1 = &uart2; 23 serial1 = &uart2;
23 serial2 = &uart3; 24 serial2 = &uart3;
25 i2c0 = &i2c1;
26 i2c1 = &i2c2;
24 }; 27 };
25 28
26 cpus { 29 cpus {
@@ -53,6 +56,28 @@
53 ranges; 56 ranges;
54 ti,hwmods = "l3_main"; 57 ti,hwmods = "l3_main";
55 58
59 aes: aes@480a6000 {
60 compatible = "ti,omap2-aes";
61 ti,hwmods = "aes";
62 reg = <0x480a6000 0x50>;
63 dmas = <&sdma 9 &sdma 10>;
64 dma-names = "tx", "rx";
65 };
66
67 hdq1w: 1w@480b2000 {
68 compatible = "ti,omap2420-1w";
69 ti,hwmods = "hdq1w";
70 reg = <0x480b2000 0x1000>;
71 interrupts = <58>;
72 };
73
74 mailbox: mailbox@48094000 {
75 compatible = "ti,omap2-mailbox";
76 ti,hwmods = "mailbox";
77 reg = <0x48094000 0x200>;
78 interrupts = <26>;
79 };
80
56 intc: interrupt-controller@1 { 81 intc: interrupt-controller@1 {
57 compatible = "ti,omap2-intc"; 82 compatible = "ti,omap2-intc";
58 interrupt-controller; 83 interrupt-controller;
@@ -63,6 +88,7 @@
63 88
64 sdma: dma-controller@48056000 { 89 sdma: dma-controller@48056000 {
65 compatible = "ti,omap2430-sdma", "ti,omap2420-sdma"; 90 compatible = "ti,omap2430-sdma", "ti,omap2420-sdma";
91 ti,hwmods = "dma";
66 reg = <0x48056000 0x1000>; 92 reg = <0x48056000 0x1000>;
67 interrupts = <12>, 93 interrupts = <12>,
68 <13>, 94 <13>,
@@ -73,21 +99,91 @@
73 #dma-requests = <64>; 99 #dma-requests = <64>;
74 }; 100 };
75 101
102 i2c1: i2c@48070000 {
103 compatible = "ti,omap2-i2c";
104 ti,hwmods = "i2c1";
105 reg = <0x48070000 0x80>;
106 #address-cells = <1>;
107 #size-cells = <0>;
108 interrupts = <56>;
109 dmas = <&sdma 27 &sdma 28>;
110 dma-names = "tx", "rx";
111 };
112
113 i2c2: i2c@48072000 {
114 compatible = "ti,omap2-i2c";
115 ti,hwmods = "i2c2";
116 reg = <0x48072000 0x80>;
117 #address-cells = <1>;
118 #size-cells = <0>;
119 interrupts = <57>;
120 dmas = <&sdma 29 &sdma 30>;
121 dma-names = "tx", "rx";
122 };
123
124 mcspi1: mcspi@48098000 {
125 compatible = "ti,omap2-mcspi";
126 ti,hwmods = "mcspi1";
127 reg = <0x48098000 0x100>;
128 interrupts = <65>;
129 dmas = <&sdma 35 &sdma 36 &sdma 37 &sdma 38
130 &sdma 39 &sdma 40 &sdma 41 &sdma 42>;
131 dma-names = "tx0", "rx0", "tx1", "rx1",
132 "tx2", "rx2", "tx3", "rx3";
133 };
134
135 mcspi2: mcspi@4809a000 {
136 compatible = "ti,omap2-mcspi";
137 ti,hwmods = "mcspi2";
138 reg = <0x4809a000 0x100>;
139 interrupts = <66>;
140 dmas = <&sdma 43 &sdma 44 &sdma 45 &sdma 46>;
141 dma-names = "tx0", "rx0", "tx1", "rx1";
142 };
143
144 rng: rng@480a0000 {
145 compatible = "ti,omap2-rng";
146 ti,hwmods = "rng";
147 reg = <0x480a0000 0x50>;
148 interrupts = <36>;
149 };
150
151 sham: sham@480a4000 {
152 compatible = "ti,omap2-sham";
153 ti,hwmods = "sham";
154 reg = <0x480a4000 0x64>;
155 interrupts = <51>;
156 dmas = <&sdma 13>;
157 dma-names = "rx";
158 };
159
76 uart1: serial@4806a000 { 160 uart1: serial@4806a000 {
77 compatible = "ti,omap2-uart"; 161 compatible = "ti,omap2-uart";
78 ti,hwmods = "uart1"; 162 ti,hwmods = "uart1";
163 reg = <0x4806a000 0x2000>;
164 interrupts = <72>;
165 dmas = <&sdma 49 &sdma 50>;
166 dma-names = "tx", "rx";
79 clock-frequency = <48000000>; 167 clock-frequency = <48000000>;
80 }; 168 };
81 169
82 uart2: serial@4806c000 { 170 uart2: serial@4806c000 {
83 compatible = "ti,omap2-uart"; 171 compatible = "ti,omap2-uart";
84 ti,hwmods = "uart2"; 172 ti,hwmods = "uart2";
173 reg = <0x4806c000 0x400>;
174 interrupts = <73>;
175 dmas = <&sdma 51 &sdma 52>;
176 dma-names = "tx", "rx";
85 clock-frequency = <48000000>; 177 clock-frequency = <48000000>;
86 }; 178 };
87 179
88 uart3: serial@4806e000 { 180 uart3: serial@4806e000 {
89 compatible = "ti,omap2-uart"; 181 compatible = "ti,omap2-uart";
90 ti,hwmods = "uart3"; 182 ti,hwmods = "uart3";
183 reg = <0x4806e000 0x400>;
184 interrupts = <74>;
185 dmas = <&sdma 53 &sdma 54>;
186 dma-names = "tx", "rx";
91 clock-frequency = <48000000>; 187 clock-frequency = <48000000>;
92 }; 188 };
93 189
diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi
index c8f9c55169ea..60c605de22dd 100644
--- a/arch/arm/boot/dts/omap2420.dtsi
+++ b/arch/arm/boot/dts/omap2420.dtsi
@@ -114,6 +114,15 @@
114 dma-names = "tx", "rx"; 114 dma-names = "tx", "rx";
115 }; 115 };
116 116
117 msdi1: mmc@4809c000 {
118 compatible = "ti,omap2420-mmc";
119 ti,hwmods = "msdi1";
120 reg = <0x4809c000 0x80>;
121 interrupts = <83>;
122 dmas = <&sdma 61 &sdma 62>;
123 dma-names = "tx", "rx";
124 };
125
117 timer1: timer@48028000 { 126 timer1: timer@48028000 {
118 compatible = "ti,omap2420-timer"; 127 compatible = "ti,omap2420-timer";
119 reg = <0x48028000 0x400>; 128 reg = <0x48028000 0x400>;
@@ -121,5 +130,19 @@
121 ti,hwmods = "timer1"; 130 ti,hwmods = "timer1";
122 ti,timer-alwon; 131 ti,timer-alwon;
123 }; 132 };
133
134 wd_timer2: wdt@48022000 {
135 compatible = "ti,omap2-wdt";
136 ti,hwmods = "wd_timer2";
137 reg = <0x48022000 0x80>;
138 };
124 }; 139 };
125}; 140};
141
142&i2c1 {
143 compatible = "ti,omap2420-i2c";
144};
145
146&i2c2 {
147 compatible = "ti,omap2420-i2c";
148};
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index c535a5a2b27f..d624345666f5 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -175,6 +175,25 @@
175 dma-names = "tx", "rx"; 175 dma-names = "tx", "rx";
176 }; 176 };
177 177
178 mmc1: mmc@4809c000 {
179 compatible = "ti,omap2-hsmmc";
180 reg = <0x4809c000 0x200>;
181 interrupts = <83>;
182 ti,hwmods = "mmc1";
183 ti,dual-volt;
184 dmas = <&sdma 61>, <&sdma 62>;
185 dma-names = "tx", "rx";
186 };
187
188 mmc2: mmc@480b4000 {
189 compatible = "ti,omap2-hsmmc";
190 reg = <0x480b4000 0x200>;
191 interrupts = <86>;
192 ti,hwmods = "mmc2";
193 dmas = <&sdma 47>, <&sdma 48>;
194 dma-names = "tx", "rx";
195 };
196
178 timer1: timer@49018000 { 197 timer1: timer@49018000 {
179 compatible = "ti,omap2420-timer"; 198 compatible = "ti,omap2420-timer";
180 reg = <0x49018000 0x400>; 199 reg = <0x49018000 0x400>;
@@ -182,5 +201,35 @@
182 ti,hwmods = "timer1"; 201 ti,hwmods = "timer1";
183 ti,timer-alwon; 202 ti,timer-alwon;
184 }; 203 };
204
205 mcspi3: mcspi@480b8000 {
206 compatible = "ti,omap2-mcspi";
207 ti,hwmods = "mcspi3";
208 reg = <0x480b8000 0x100>;
209 interrupts = <91>;
210 dmas = <&sdma 15 &sdma 16 &sdma 23 &sdma 24>;
211 dma-names = "tx0", "rx0", "tx1", "rx1";
212 };
213
214 usb_otg_hs: usb_otg_hs@480ac000 {
215 compatible = "ti,omap2-musb";
216 ti,hwmods = "usb_otg_hs";
217 reg = <0x480ac000 0x1000>;
218 interrupts = <93>;
219 };
220
221 wd_timer2: wdt@49016000 {
222 compatible = "ti,omap2-wdt";
223 ti,hwmods = "wd_timer2";
224 reg = <0x49016000 0x80>;
225 };
185 }; 226 };
186}; 227};
228
229&i2c1 {
230 compatible = "ti,omap2430-i2c";
231};
232
233&i2c2 {
234 compatible = "ti,omap2430-i2c";
235};
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 31a632f7effb..df33a50bc070 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -215,3 +215,10 @@
215&usbhsehci { 215&usbhsehci {
216 phys = <0 &hsusb2_phy>; 216 phys = <0 &hsusb2_phy>;
217}; 217};
218
219&vaux2 {
220 regulator-name = "usb_1v8";
221 regulator-min-microvolt = <1800000>;
222 regulator-max-microvolt = <1800000>;
223 regulator-always-on;
224};
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index fa532aaacc68..3ba4a625ea5b 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -61,6 +61,14 @@
61 vcc-supply = <&hsusb2_power>; 61 vcc-supply = <&hsusb2_power>;
62 }; 62 };
63 63
64 sound {
65 compatible = "ti,omap-twl4030";
66 ti,model = "omap3beagle";
67
68 ti,mcbsp = <&mcbsp2>;
69 ti,codec = <&twl_audio>;
70 };
71
64 gpio_keys { 72 gpio_keys {
65 compatible = "gpio-keys"; 73 compatible = "gpio-keys";
66 74
@@ -120,6 +128,12 @@
120 reg = <0x48>; 128 reg = <0x48>;
121 interrupts = <7>; /* SYS_NIRQ cascaded to intc */ 129 interrupts = <7>; /* SYS_NIRQ cascaded to intc */
122 interrupt-parent = <&intc>; 130 interrupt-parent = <&intc>;
131
132 twl_audio: audio {
133 compatible = "ti,twl4030-audio";
134 codec {
135 };
136 };
123 }; 137 };
124}; 138};
125 139
@@ -178,3 +192,10 @@
178 mode = <3>; 192 mode = <3>;
179 power = <50>; 193 power = <50>;
180}; 194};
195
196&vaux2 {
197 regulator-name = "vdd_ehci";
198 regulator-min-microvolt = <1800000>;
199 regulator-max-microvolt = <1800000>;
200 regulator-always-on;
201};
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index ba1e58b7b7e3..165aaf7591ba 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -1,5 +1,5 @@
1/* 1/*
2 * Device Tree Source for IGEP Technology devices 2 * Common device tree for IGEP boards based on AM/DM37x
3 * 3 *
4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk> 4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk>
5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com> 5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
@@ -10,7 +10,7 @@
10 */ 10 */
11/dts-v1/; 11/dts-v1/;
12 12
13#include "omap34xx.dtsi" 13#include "omap36xx.dtsi"
14 14
15/ { 15/ {
16 memory { 16 memory {
@@ -24,6 +24,25 @@
24 ti,mcbsp = <&mcbsp2>; 24 ti,mcbsp = <&mcbsp2>;
25 ti,codec = <&twl_audio>; 25 ti,codec = <&twl_audio>;
26 }; 26 };
27
28 vdd33: regulator-vdd33 {
29 compatible = "regulator-fixed";
30 regulator-name = "vdd33";
31 regulator-always-on;
32 };
33
34 lbee1usjyc_vmmc: lbee1usjyc_vmmc {
35 pinctrl-names = "default";
36 pinctrl-0 = <&lbee1usjyc_pins>;
37 compatible = "regulator-fixed";
38 regulator-name = "regulator-lbee1usjyc";
39 regulator-min-microvolt = <3300000>;
40 regulator-max-microvolt = <3300000>;
41 gpio = <&gpio5 10 GPIO_ACTIVE_HIGH>; /* gpio_138 WIFI_PDN */
42 startup-delay-us = <10000>;
43 enable-active-high;
44 vin-supply = <&vdd33>;
45 };
27}; 46};
28 47
29&omap3_pmx_core { 48&omap3_pmx_core {
@@ -48,6 +67,15 @@
48 >; 67 >;
49 }; 68 };
50 69
70 /* WiFi/BT combo */
71 lbee1usjyc_pins: pinmux_lbee1usjyc_pins {
72 pinctrl-single,pins = <
73 0x136 (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat5.gpio_137 */
74 0x138 (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat6.gpio_138 */
75 0x13a (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat7.gpio_139 */
76 >;
77 };
78
51 mcbsp2_pins: pinmux_mcbsp2_pins { 79 mcbsp2_pins: pinmux_mcbsp2_pins {
52 pinctrl-single,pins = < 80 pinctrl-single,pins = <
53 0x10c (PIN_INPUT | MUX_MODE0) /* mcbsp2_fsx.mcbsp2_fsx */ 81 0x10c (PIN_INPUT | MUX_MODE0) /* mcbsp2_fsx.mcbsp2_fsx */
@@ -65,10 +93,17 @@
65 0x11a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */ 93 0x11a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
66 0x11c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */ 94 0x11c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
67 0x11e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */ 95 0x11e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
68 0x120 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat4.sdmmc1_dat4 */ 96 >;
69 0x122 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat5.sdmmc1_dat5 */ 97 };
70 0x124 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat6.sdmmc1_dat6 */ 98
71 0x126 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat7.sdmmc1_dat7 */ 99 mmc2_pins: pinmux_mmc2_pins {
100 pinctrl-single,pins = <
101 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
102 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
103 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
104 0x12e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
105 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
106 0x132 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
72 >; 107 >;
73 }; 108 };
74 109
@@ -78,10 +113,33 @@
78 >; 113 >;
79 }; 114 };
80 115
116 i2c1_pins: pinmux_i2c1_pins {
117 pinctrl-single,pins = <
118 0x18a (PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
119 0x18c (PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
120 >;
121 };
122
123 i2c2_pins: pinmux_i2c2_pins {
124 pinctrl-single,pins = <
125 0x18e (PIN_INPUT | MUX_MODE0) /* i2c2_scl.i2c2_scl */
126 0x190 (PIN_INPUT | MUX_MODE0) /* i2c2_sda.i2c2_sda */
127 >;
128 };
129
130 i2c3_pins: pinmux_i2c3_pins {
131 pinctrl-single,pins = <
132 0x192 (PIN_INPUT | MUX_MODE0) /* i2c3_scl.i2c3_scl */
133 0x194 (PIN_INPUT | MUX_MODE0) /* i2c3_sda.i2c3_sda */
134 >;
135 };
136
81 leds_pins: pinmux_leds_pins { }; 137 leds_pins: pinmux_leds_pins { };
82}; 138};
83 139
84&i2c1 { 140&i2c1 {
141 pinctrl-names = "default";
142 pinctrl-0 = <&i2c1_pins>;
85 clock-frequency = <2600000>; 143 clock-frequency = <2600000>;
86 144
87 twl: twl@48 { 145 twl: twl@48 {
@@ -101,9 +159,16 @@
101#include "twl4030_omap3.dtsi" 159#include "twl4030_omap3.dtsi"
102 160
103&i2c2 { 161&i2c2 {
162 pinctrl-names = "default";
163 pinctrl-0 = <&i2c2_pins>;
104 clock-frequency = <400000>; 164 clock-frequency = <400000>;
105}; 165};
106 166
167&i2c3 {
168 pinctrl-names = "default";
169 pinctrl-0 = <&i2c3_pins>;
170};
171
107&mcbsp2 { 172&mcbsp2 {
108 pinctrl-names = "default"; 173 pinctrl-names = "default";
109 pinctrl-0 = <&mcbsp2_pins>; 174 pinctrl-0 = <&mcbsp2_pins>;
@@ -114,11 +179,15 @@
114 pinctrl-0 = <&mmc1_pins>; 179 pinctrl-0 = <&mmc1_pins>;
115 vmmc-supply = <&vmmc1>; 180 vmmc-supply = <&vmmc1>;
116 vmmc_aux-supply = <&vsim>; 181 vmmc_aux-supply = <&vsim>;
117 bus-width = <8>; 182 bus-width = <4>;
118}; 183};
119 184
120&mmc2 { 185&mmc2 {
121 status = "disabled"; 186 pinctrl-names = "default";
187 pinctrl-0 = <&mmc2_pins>;
188 vmmc-supply = <&lbee1usjyc_vmmc>;
189 bus-width = <4>;
190 non-removable;
122}; 191};
123 192
124&mmc3 { 193&mmc3 {
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index d5cc79267250..1c7e74d2d2bc 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -1,5 +1,5 @@
1/* 1/*
2 * Device Tree Source for IGEPv2 board 2 * Device Tree Source for IGEPv2 Rev. (TI OMAP AM/DM37x)
3 * 3 *
4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk> 4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk>
5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com> 5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
@@ -13,7 +13,7 @@
13#include "omap-gpmc-smsc911x.dtsi" 13#include "omap-gpmc-smsc911x.dtsi"
14 14
15/ { 15/ {
16 model = "IGEPv2"; 16 model = "IGEPv2 (TI OMAP AM/DM37x)";
17 compatible = "isee,omap3-igep0020", "ti,omap3"; 17 compatible = "isee,omap3-igep0020", "ti,omap3";
18 18
19 leds { 19 leds {
@@ -67,6 +67,8 @@
67 pinctrl-names = "default"; 67 pinctrl-names = "default";
68 pinctrl-0 = < 68 pinctrl-0 = <
69 &hsusbb1_pins 69 &hsusbb1_pins
70 &tfp410_pins
71 &dss_pins
70 >; 72 >;
71 73
72 hsusbb1_pins: pinmux_hsusbb1_pins { 74 hsusbb1_pins: pinmux_hsusbb1_pins {
@@ -85,6 +87,45 @@
85 0x5ba (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d7.hsusb1_data3 */ 87 0x5ba (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d7.hsusb1_data3 */
86 >; 88 >;
87 }; 89 };
90
91 tfp410_pins: tfp410_dvi_pins {
92 pinctrl-single,pins = <
93 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
94 >;
95 };
96
97 dss_pins: pinmux_dss_dvi_pins {
98 pinctrl-single,pins = <
99 0x0a4 (PIN_OUTPUT | MUX_MODE0) /* dss_pclk.dss_pclk */
100 0x0a6 (PIN_OUTPUT | MUX_MODE0) /* dss_hsync.dss_hsync */
101 0x0a8 (PIN_OUTPUT | MUX_MODE0) /* dss_vsync.dss_vsync */
102 0x0aa (PIN_OUTPUT | MUX_MODE0) /* dss_acbias.dss_acbias */
103 0x0ac (PIN_OUTPUT | MUX_MODE0) /* dss_data0.dss_data0 */
104 0x0ae (PIN_OUTPUT | MUX_MODE0) /* dss_data1.dss_data1 */
105 0x0b0 (PIN_OUTPUT | MUX_MODE0) /* dss_data2.dss_data2 */
106 0x0b2 (PIN_OUTPUT | MUX_MODE0) /* dss_data3.dss_data3 */
107 0x0b4 (PIN_OUTPUT | MUX_MODE0) /* dss_data4.dss_data4 */
108 0x0b6 (PIN_OUTPUT | MUX_MODE0) /* dss_data5.dss_data5 */
109 0x0b8 (PIN_OUTPUT | MUX_MODE0) /* dss_data6.dss_data6 */
110 0x0ba (PIN_OUTPUT | MUX_MODE0) /* dss_data7.dss_data7 */
111 0x0bc (PIN_OUTPUT | MUX_MODE0) /* dss_data8.dss_data8 */
112 0x0be (PIN_OUTPUT | MUX_MODE0) /* dss_data9.dss_data9 */
113 0x0c0 (PIN_OUTPUT | MUX_MODE0) /* dss_data10.dss_data10 */
114 0x0c2 (PIN_OUTPUT | MUX_MODE0) /* dss_data11.dss_data11 */
115 0x0c4 (PIN_OUTPUT | MUX_MODE0) /* dss_data12.dss_data12 */
116 0x0c6 (PIN_OUTPUT | MUX_MODE0) /* dss_data13.dss_data13 */
117 0x0c8 (PIN_OUTPUT | MUX_MODE0) /* dss_data14.dss_data14 */
118 0x0ca (PIN_OUTPUT | MUX_MODE0) /* dss_data15.dss_data15 */
119 0x0cc (PIN_OUTPUT | MUX_MODE0) /* dss_data16.dss_data16 */
120 0x0ce (PIN_OUTPUT | MUX_MODE0) /* dss_data17.dss_data17 */
121 0x0d0 (PIN_OUTPUT | MUX_MODE0) /* dss_data18.dss_data18 */
122 0x0d2 (PIN_OUTPUT | MUX_MODE0) /* dss_data19.dss_data19 */
123 0x0d4 (PIN_OUTPUT | MUX_MODE0) /* dss_data20.dss_data20 */
124 0x0d6 (PIN_OUTPUT | MUX_MODE0) /* dss_data21.dss_data21 */
125 0x0d8 (PIN_OUTPUT | MUX_MODE0) /* dss_data22.dss_data22 */
126 0x0da (PIN_OUTPUT | MUX_MODE0) /* dss_data23.dss_data23 */
127 >;
128 };
88}; 129};
89 130
90&leds_pins { 131&leds_pins {
@@ -174,3 +215,8 @@
174&usbhsehci { 215&usbhsehci {
175 phys = <&hsusb1_phy>; 216 phys = <&hsusb1_phy>;
176}; 217};
218
219&vpll2 {
220 /* Needed for DSS */
221 regulator-name = "vdds_dsi";
222};
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 525e6d9b0978..02a23f8a3384 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -1,5 +1,5 @@
1/* 1/*
2 * Device Tree Source for IGEP COM Module 2 * Device Tree Source for IGEP COM MODULE (TI OMAP AM/DM37x)
3 * 3 *
4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk> 4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk>
5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com> 5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
@@ -12,7 +12,7 @@
12#include "omap3-igep.dtsi" 12#include "omap3-igep.dtsi"
13 13
14/ { 14/ {
15 model = "IGEP COM Module"; 15 model = "IGEP COM MODULE (TI OMAP AM/DM37x)";
16 compatible = "isee,omap3-igep0030", "ti,omap3"; 16 compatible = "isee,omap3-igep0030", "ti,omap3";
17 17
18 leds { 18 leds {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index c4f20bfe4cce..6fc85f963530 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -9,7 +9,7 @@
9 9
10/dts-v1/; 10/dts-v1/;
11 11
12#include "omap34xx.dtsi" 12#include "omap34xx-hs.dtsi"
13 13
14/ { 14/ {
15 model = "Nokia N900"; 15 model = "Nokia N900";
@@ -125,6 +125,21 @@
125 >; 125 >;
126 }; 126 };
127 127
128 mmc2_pins: pinmux_mmc2_pins {
129 pinctrl-single,pins = <
130 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk */
131 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd */
132 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0 */
133 0x12e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1 */
134 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2 */
135 0x132 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3 */
136 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat4 */
137 0x136 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat5 */
138 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat6 */
139 0x13a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat7 */
140 >;
141 };
142
128 display_pins: pinmux_display_pins { 143 display_pins: pinmux_display_pins {
129 pinctrl-single,pins = < 144 pinctrl-single,pins = <
130 0x0d4 (PIN_OUTPUT | MUX_MODE4) /* RX51_LCD_RESET_GPIO */ 145 0x0d4 (PIN_OUTPUT | MUX_MODE4) /* RX51_LCD_RESET_GPIO */
@@ -358,8 +373,14 @@
358 cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ 373 cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */
359}; 374};
360 375
376/* most boards use vaux3, only some old versions use vmmc2 instead */
361&mmc2 { 377&mmc2 {
362 status = "disabled"; 378 pinctrl-names = "default";
379 pinctrl-0 = <&mmc2_pins>;
380 vmmc-supply = <&vaux3>;
381 vmmc_aux-supply = <&vsim>;
382 bus-width = <8>;
383 non-removable;
363}; 384};
364 385
365&mmc3 { 386&mmc3 {
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 94eb77d3b9dd..5c26c184f2c1 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -8,7 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include "omap36xx.dtsi" 11#include "omap36xx-hs.dtsi"
12 12
13/ { 13/ {
14 cpus { 14 cpus {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index f3a0c26ed0c2..daabf99d402a 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -82,6 +82,13 @@
82 ranges; 82 ranges;
83 ti,hwmods = "l3_main"; 83 ti,hwmods = "l3_main";
84 84
85 aes: aes@480c5000 {
86 compatible = "ti,omap3-aes";
87 ti,hwmods = "aes";
88 reg = <0x480c5000 0x50>;
89 interrupts = <0>;
90 };
91
85 counter32k: counter@48320000 { 92 counter32k: counter@48320000 {
86 compatible = "ti,omap-counter32k"; 93 compatible = "ti,omap-counter32k";
87 reg = <0x48320000 0x20>; 94 reg = <0x48320000 0x20>;
@@ -260,6 +267,13 @@
260 ti,hwmods = "i2c3"; 267 ti,hwmods = "i2c3";
261 }; 268 };
262 269
270 mailbox: mailbox@48094000 {
271 compatible = "ti,omap3-mailbox";
272 ti,hwmods = "mailbox";
273 reg = <0x48094000 0x200>;
274 interrupts = <26>;
275 };
276
263 mcspi1: spi@48098000 { 277 mcspi1: spi@48098000 {
264 compatible = "ti,omap2-mcspi"; 278 compatible = "ti,omap2-mcspi";
265 reg = <0x48098000 0x100>; 279 reg = <0x48098000 0x100>;
@@ -357,6 +371,13 @@
357 dma-names = "tx", "rx"; 371 dma-names = "tx", "rx";
358 }; 372 };
359 373
374 mmu_isp: mmu@480bd400 {
375 compatible = "ti,omap3-mmu-isp";
376 ti,hwmods = "mmu_isp";
377 reg = <0x480bd400 0x80>;
378 interrupts = <8>;
379 };
380
360 wdt2: wdt@48314000 { 381 wdt2: wdt@48314000 {
361 compatible = "ti,omap3-wdt"; 382 compatible = "ti,omap3-wdt";
362 reg = <0x48314000 0x80>; 383 reg = <0x48314000 0x80>;
@@ -442,6 +463,27 @@
442 dma-names = "tx", "rx"; 463 dma-names = "tx", "rx";
443 }; 464 };
444 465
466 sham: sham@480c3000 {
467 compatible = "ti,omap3-sham";
468 ti,hwmods = "sham";
469 reg = <0x480c3000 0x64>;
470 interrupts = <49>;
471 };
472
473 smartreflex_core: smartreflex@480cb000 {
474 compatible = "ti,omap3-smartreflex-core";
475 ti,hwmods = "smartreflex_core";
476 reg = <0x480cb000 0x400>;
477 interrupts = <19>;
478 };
479
480 smartreflex_mpu_iva: smartreflex@480c9000 {
481 compatible = "ti,omap3-smartreflex-iva";
482 ti,hwmods = "smartreflex_mpu_iva";
483 reg = <0x480c9000 0x400>;
484 interrupts = <18>;
485 };
486
445 timer1: timer@48318000 { 487 timer1: timer@48318000 {
446 compatible = "ti,omap3430-timer"; 488 compatible = "ti,omap3430-timer";
447 reg = <0x48318000 0x400>; 489 reg = <0x48318000 0x400>;
diff --git a/arch/arm/boot/dts/omap34xx-hs.dtsi b/arch/arm/boot/dts/omap34xx-hs.dtsi
new file mode 100644
index 000000000000..1ff626489546
--- /dev/null
+++ b/arch/arm/boot/dts/omap34xx-hs.dtsi
@@ -0,0 +1,16 @@
1/* Disabled modules for secure omaps */
2
3#include "omap34xx.dtsi"
4
5/* Secure omaps have some devices inaccessible depending on the firmware */
6&aes {
7 status = "disabled";
8};
9
10&sham {
11 status = "disabled";
12};
13
14&timer12 {
15 status = "disabled";
16};
diff --git a/arch/arm/boot/dts/omap36xx-hs.dtsi b/arch/arm/boot/dts/omap36xx-hs.dtsi
new file mode 100644
index 000000000000..2c7febb0e016
--- /dev/null
+++ b/arch/arm/boot/dts/omap36xx-hs.dtsi
@@ -0,0 +1,16 @@
1/* Disabled modules for secure omaps */
2
3#include "omap36xx.dtsi"
4
5/* Secure omaps have some devices inaccessible depending on the firmware */
6&aes {
7 status = "disabled";
8};
9
10&sham {
11 status = "disabled";
12};
13
14&timer12 {
15 status = "disabled";
16};
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 298e85020e1b..88c6a05cab41 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -246,15 +246,6 @@
246 0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */ 246 0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */
247 >; 247 >;
248 }; 248 };
249};
250
251&omap4_pmx_wkup {
252 led_wkgpio_pins: pinmux_leds_wkpins {
253 pinctrl-single,pins = <
254 0x1a (PIN_OUTPUT | MUX_MODE3) /* gpio_wk7 */
255 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */
256 >;
257 };
258 249
259 /* 250 /*
260 * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP 251 * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP
@@ -274,7 +265,7 @@
274 pinctrl-single,pins = < 265 pinctrl-single,pins = <
275 0x38 (PIN_INPUT | MUX_MODE3) /* gpmc_ncs2.gpio_52 */ 266 0x38 (PIN_INPUT | MUX_MODE3) /* gpmc_ncs2.gpio_52 */
276 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */ 267 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
277 0x108 (PIN_OUTPUT | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */ 268 0x108 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
278 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */ 269 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */
279 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */ 270 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */
280 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */ 271 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */
@@ -284,6 +275,15 @@
284 }; 275 };
285}; 276};
286 277
278&omap4_pmx_wkup {
279 led_wkgpio_pins: pinmux_leds_wkpins {
280 pinctrl-single,pins = <
281 0x1a (PIN_OUTPUT | MUX_MODE3) /* gpio_wk7 */
282 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */
283 >;
284 };
285};
286
287&i2c1 { 287&i2c1 {
288 pinctrl-names = "default"; 288 pinctrl-names = "default";
289 pinctrl-0 = <&i2c1_pins>; 289 pinctrl-0 = <&i2c1_pins>;
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 5fc3f43c5a81..dbc81fb6ef03 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -300,12 +300,12 @@
300 wl12xx_pins: pinmux_wl12xx_pins { 300 wl12xx_pins: pinmux_wl12xx_pins {
301 pinctrl-single,pins = < 301 pinctrl-single,pins = <
302 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */ 302 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
303 0x108 (PIN_OUTPUT | MUX_MODE3) /* sdmmc5_clk.sdmmc5_clk */ 303 0x108 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
304 0x10a (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_cmd.sdmmc5_cmd */ 304 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */
305 0x10c (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat0.sdmmc5_dat0 */ 305 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */
306 0x10e (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat1.sdmmc5_dat1 */ 306 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */
307 0x110 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat2.sdmmc5_dat2 */ 307 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat2.sdmmc5_dat2 */
308 0x112 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat3.sdmmc5_dat3 */ 308 0x112 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat3.sdmmc5_dat3 */
309 >; 309 >;
310 }; 310 };
311}; 311};
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 6d09b8d42fdd..f936476c2753 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -245,14 +245,14 @@
245 245
246 mpu_periph_clk: mpu_periph_clk { 246 mpu_periph_clk: mpu_periph_clk {
247 #clock-cells = <0>; 247 #clock-cells = <0>;
248 compatible = "altr,socfpga-gate-clk"; 248 compatible = "altr,socfpga-perip-clk";
249 clocks = <&mpuclk>; 249 clocks = <&mpuclk>;
250 fixed-divider = <4>; 250 fixed-divider = <4>;
251 }; 251 };
252 252
253 mpu_l2_ram_clk: mpu_l2_ram_clk { 253 mpu_l2_ram_clk: mpu_l2_ram_clk {
254 #clock-cells = <0>; 254 #clock-cells = <0>;
255 compatible = "altr,socfpga-gate-clk"; 255 compatible = "altr,socfpga-perip-clk";
256 clocks = <&mpuclk>; 256 clocks = <&mpuclk>;
257 fixed-divider = <2>; 257 fixed-divider = <2>;
258 }; 258 };
@@ -266,8 +266,9 @@
266 266
267 l3_main_clk: l3_main_clk { 267 l3_main_clk: l3_main_clk {
268 #clock-cells = <0>; 268 #clock-cells = <0>;
269 compatible = "altr,socfpga-gate-clk"; 269 compatible = "altr,socfpga-perip-clk";
270 clocks = <&mainclk>; 270 clocks = <&mainclk>;
271 fixed-divider = <1>;
271 }; 272 };
272 273
273 l3_mp_clk: l3_mp_clk { 274 l3_mp_clk: l3_mp_clk {
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index c1751a64889a..7f5878c2784a 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -193,7 +193,10 @@
193 pio: pinctrl@01c20800 { 193 pio: pinctrl@01c20800 {
194 compatible = "allwinner,sun6i-a31-pinctrl"; 194 compatible = "allwinner,sun6i-a31-pinctrl";
195 reg = <0x01c20800 0x400>; 195 reg = <0x01c20800 0x400>;
196 interrupts = <0 11 1>, <0 15 1>, <0 16 1>, <0 17 1>; 196 interrupts = <0 11 4>,
197 <0 15 4>,
198 <0 16 4>,
199 <0 17 4>;
197 clocks = <&apb1_gates 5>; 200 clocks = <&apb1_gates 5>;
198 gpio-controller; 201 gpio-controller;
199 interrupt-controller; 202 interrupt-controller;
@@ -212,11 +215,11 @@
212 timer@01c20c00 { 215 timer@01c20c00 {
213 compatible = "allwinner,sun4i-timer"; 216 compatible = "allwinner,sun4i-timer";
214 reg = <0x01c20c00 0xa0>; 217 reg = <0x01c20c00 0xa0>;
215 interrupts = <0 18 1>, 218 interrupts = <0 18 4>,
216 <0 19 1>, 219 <0 19 4>,
217 <0 20 1>, 220 <0 20 4>,
218 <0 21 1>, 221 <0 21 4>,
219 <0 22 1>; 222 <0 22 4>;
220 clocks = <&osc24M>; 223 clocks = <&osc24M>;
221 }; 224 };
222 225
@@ -228,7 +231,7 @@
228 uart0: serial@01c28000 { 231 uart0: serial@01c28000 {
229 compatible = "snps,dw-apb-uart"; 232 compatible = "snps,dw-apb-uart";
230 reg = <0x01c28000 0x400>; 233 reg = <0x01c28000 0x400>;
231 interrupts = <0 0 1>; 234 interrupts = <0 0 4>;
232 reg-shift = <2>; 235 reg-shift = <2>;
233 reg-io-width = <4>; 236 reg-io-width = <4>;
234 clocks = <&apb2_gates 16>; 237 clocks = <&apb2_gates 16>;
@@ -238,7 +241,7 @@
238 uart1: serial@01c28400 { 241 uart1: serial@01c28400 {
239 compatible = "snps,dw-apb-uart"; 242 compatible = "snps,dw-apb-uart";
240 reg = <0x01c28400 0x400>; 243 reg = <0x01c28400 0x400>;
241 interrupts = <0 1 1>; 244 interrupts = <0 1 4>;
242 reg-shift = <2>; 245 reg-shift = <2>;
243 reg-io-width = <4>; 246 reg-io-width = <4>;
244 clocks = <&apb2_gates 17>; 247 clocks = <&apb2_gates 17>;
@@ -248,7 +251,7 @@
248 uart2: serial@01c28800 { 251 uart2: serial@01c28800 {
249 compatible = "snps,dw-apb-uart"; 252 compatible = "snps,dw-apb-uart";
250 reg = <0x01c28800 0x400>; 253 reg = <0x01c28800 0x400>;
251 interrupts = <0 2 1>; 254 interrupts = <0 2 4>;
252 reg-shift = <2>; 255 reg-shift = <2>;
253 reg-io-width = <4>; 256 reg-io-width = <4>;
254 clocks = <&apb2_gates 18>; 257 clocks = <&apb2_gates 18>;
@@ -258,7 +261,7 @@
258 uart3: serial@01c28c00 { 261 uart3: serial@01c28c00 {
259 compatible = "snps,dw-apb-uart"; 262 compatible = "snps,dw-apb-uart";
260 reg = <0x01c28c00 0x400>; 263 reg = <0x01c28c00 0x400>;
261 interrupts = <0 3 1>; 264 interrupts = <0 3 4>;
262 reg-shift = <2>; 265 reg-shift = <2>;
263 reg-io-width = <4>; 266 reg-io-width = <4>;
264 clocks = <&apb2_gates 19>; 267 clocks = <&apb2_gates 19>;
@@ -268,7 +271,7 @@
268 uart4: serial@01c29000 { 271 uart4: serial@01c29000 {
269 compatible = "snps,dw-apb-uart"; 272 compatible = "snps,dw-apb-uart";
270 reg = <0x01c29000 0x400>; 273 reg = <0x01c29000 0x400>;
271 interrupts = <0 4 1>; 274 interrupts = <0 4 4>;
272 reg-shift = <2>; 275 reg-shift = <2>;
273 reg-io-width = <4>; 276 reg-io-width = <4>;
274 clocks = <&apb2_gates 20>; 277 clocks = <&apb2_gates 20>;
@@ -278,7 +281,7 @@
278 uart5: serial@01c29400 { 281 uart5: serial@01c29400 {
279 compatible = "snps,dw-apb-uart"; 282 compatible = "snps,dw-apb-uart";
280 reg = <0x01c29400 0x400>; 283 reg = <0x01c29400 0x400>;
281 interrupts = <0 5 1>; 284 interrupts = <0 5 4>;
282 reg-shift = <2>; 285 reg-shift = <2>;
283 reg-io-width = <4>; 286 reg-io-width = <4>;
284 clocks = <&apb2_gates 21>; 287 clocks = <&apb2_gates 21>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index e46cfedde74c..367611a0730b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -170,7 +170,7 @@
170 emac: ethernet@01c0b000 { 170 emac: ethernet@01c0b000 {
171 compatible = "allwinner,sun4i-emac"; 171 compatible = "allwinner,sun4i-emac";
172 reg = <0x01c0b000 0x1000>; 172 reg = <0x01c0b000 0x1000>;
173 interrupts = <0 55 1>; 173 interrupts = <0 55 4>;
174 clocks = <&ahb_gates 17>; 174 clocks = <&ahb_gates 17>;
175 status = "disabled"; 175 status = "disabled";
176 }; 176 };
@@ -186,7 +186,7 @@
186 pio: pinctrl@01c20800 { 186 pio: pinctrl@01c20800 {
187 compatible = "allwinner,sun7i-a20-pinctrl"; 187 compatible = "allwinner,sun7i-a20-pinctrl";
188 reg = <0x01c20800 0x400>; 188 reg = <0x01c20800 0x400>;
189 interrupts = <0 28 1>; 189 interrupts = <0 28 4>;
190 clocks = <&apb0_gates 5>; 190 clocks = <&apb0_gates 5>;
191 gpio-controller; 191 gpio-controller;
192 interrupt-controller; 192 interrupt-controller;
@@ -251,12 +251,12 @@
251 timer@01c20c00 { 251 timer@01c20c00 {
252 compatible = "allwinner,sun4i-timer"; 252 compatible = "allwinner,sun4i-timer";
253 reg = <0x01c20c00 0x90>; 253 reg = <0x01c20c00 0x90>;
254 interrupts = <0 22 1>, 254 interrupts = <0 22 4>,
255 <0 23 1>, 255 <0 23 4>,
256 <0 24 1>, 256 <0 24 4>,
257 <0 25 1>, 257 <0 25 4>,
258 <0 67 1>, 258 <0 67 4>,
259 <0 68 1>; 259 <0 68 4>;
260 clocks = <&osc24M>; 260 clocks = <&osc24M>;
261 }; 261 };
262 262
@@ -273,7 +273,7 @@
273 uart0: serial@01c28000 { 273 uart0: serial@01c28000 {
274 compatible = "snps,dw-apb-uart"; 274 compatible = "snps,dw-apb-uart";
275 reg = <0x01c28000 0x400>; 275 reg = <0x01c28000 0x400>;
276 interrupts = <0 1 1>; 276 interrupts = <0 1 4>;
277 reg-shift = <2>; 277 reg-shift = <2>;
278 reg-io-width = <4>; 278 reg-io-width = <4>;
279 clocks = <&apb1_gates 16>; 279 clocks = <&apb1_gates 16>;
@@ -283,7 +283,7 @@
283 uart1: serial@01c28400 { 283 uart1: serial@01c28400 {
284 compatible = "snps,dw-apb-uart"; 284 compatible = "snps,dw-apb-uart";
285 reg = <0x01c28400 0x400>; 285 reg = <0x01c28400 0x400>;
286 interrupts = <0 2 1>; 286 interrupts = <0 2 4>;
287 reg-shift = <2>; 287 reg-shift = <2>;
288 reg-io-width = <4>; 288 reg-io-width = <4>;
289 clocks = <&apb1_gates 17>; 289 clocks = <&apb1_gates 17>;
@@ -293,7 +293,7 @@
293 uart2: serial@01c28800 { 293 uart2: serial@01c28800 {
294 compatible = "snps,dw-apb-uart"; 294 compatible = "snps,dw-apb-uart";
295 reg = <0x01c28800 0x400>; 295 reg = <0x01c28800 0x400>;
296 interrupts = <0 3 1>; 296 interrupts = <0 3 4>;
297 reg-shift = <2>; 297 reg-shift = <2>;
298 reg-io-width = <4>; 298 reg-io-width = <4>;
299 clocks = <&apb1_gates 18>; 299 clocks = <&apb1_gates 18>;
@@ -303,7 +303,7 @@
303 uart3: serial@01c28c00 { 303 uart3: serial@01c28c00 {
304 compatible = "snps,dw-apb-uart"; 304 compatible = "snps,dw-apb-uart";
305 reg = <0x01c28c00 0x400>; 305 reg = <0x01c28c00 0x400>;
306 interrupts = <0 4 1>; 306 interrupts = <0 4 4>;
307 reg-shift = <2>; 307 reg-shift = <2>;
308 reg-io-width = <4>; 308 reg-io-width = <4>;
309 clocks = <&apb1_gates 19>; 309 clocks = <&apb1_gates 19>;
@@ -313,7 +313,7 @@
313 uart4: serial@01c29000 { 313 uart4: serial@01c29000 {
314 compatible = "snps,dw-apb-uart"; 314 compatible = "snps,dw-apb-uart";
315 reg = <0x01c29000 0x400>; 315 reg = <0x01c29000 0x400>;
316 interrupts = <0 17 1>; 316 interrupts = <0 17 4>;
317 reg-shift = <2>; 317 reg-shift = <2>;
318 reg-io-width = <4>; 318 reg-io-width = <4>;
319 clocks = <&apb1_gates 20>; 319 clocks = <&apb1_gates 20>;
@@ -323,7 +323,7 @@
323 uart5: serial@01c29400 { 323 uart5: serial@01c29400 {
324 compatible = "snps,dw-apb-uart"; 324 compatible = "snps,dw-apb-uart";
325 reg = <0x01c29400 0x400>; 325 reg = <0x01c29400 0x400>;
326 interrupts = <0 18 1>; 326 interrupts = <0 18 4>;
327 reg-shift = <2>; 327 reg-shift = <2>;
328 reg-io-width = <4>; 328 reg-io-width = <4>;
329 clocks = <&apb1_gates 21>; 329 clocks = <&apb1_gates 21>;
@@ -333,7 +333,7 @@
333 uart6: serial@01c29800 { 333 uart6: serial@01c29800 {
334 compatible = "snps,dw-apb-uart"; 334 compatible = "snps,dw-apb-uart";
335 reg = <0x01c29800 0x400>; 335 reg = <0x01c29800 0x400>;
336 interrupts = <0 19 1>; 336 interrupts = <0 19 4>;
337 reg-shift = <2>; 337 reg-shift = <2>;
338 reg-io-width = <4>; 338 reg-io-width = <4>;
339 clocks = <&apb1_gates 22>; 339 clocks = <&apb1_gates 22>;
@@ -343,7 +343,7 @@
343 uart7: serial@01c29c00 { 343 uart7: serial@01c29c00 {
344 compatible = "snps,dw-apb-uart"; 344 compatible = "snps,dw-apb-uart";
345 reg = <0x01c29c00 0x400>; 345 reg = <0x01c29c00 0x400>;
346 interrupts = <0 20 1>; 346 interrupts = <0 20 4>;
347 reg-shift = <2>; 347 reg-shift = <2>;
348 reg-io-width = <4>; 348 reg-io-width = <4>;
349 clocks = <&apb1_gates 23>; 349 clocks = <&apb1_gates 23>;
@@ -353,7 +353,7 @@
353 i2c0: i2c@01c2ac00 { 353 i2c0: i2c@01c2ac00 {
354 compatible = "allwinner,sun4i-i2c"; 354 compatible = "allwinner,sun4i-i2c";
355 reg = <0x01c2ac00 0x400>; 355 reg = <0x01c2ac00 0x400>;
356 interrupts = <0 7 1>; 356 interrupts = <0 7 4>;
357 clocks = <&apb1_gates 0>; 357 clocks = <&apb1_gates 0>;
358 clock-frequency = <100000>; 358 clock-frequency = <100000>;
359 status = "disabled"; 359 status = "disabled";
@@ -362,7 +362,7 @@
362 i2c1: i2c@01c2b000 { 362 i2c1: i2c@01c2b000 {
363 compatible = "allwinner,sun4i-i2c"; 363 compatible = "allwinner,sun4i-i2c";
364 reg = <0x01c2b000 0x400>; 364 reg = <0x01c2b000 0x400>;
365 interrupts = <0 8 1>; 365 interrupts = <0 8 4>;
366 clocks = <&apb1_gates 1>; 366 clocks = <&apb1_gates 1>;
367 clock-frequency = <100000>; 367 clock-frequency = <100000>;
368 status = "disabled"; 368 status = "disabled";
@@ -371,7 +371,7 @@
371 i2c2: i2c@01c2b400 { 371 i2c2: i2c@01c2b400 {
372 compatible = "allwinner,sun4i-i2c"; 372 compatible = "allwinner,sun4i-i2c";
373 reg = <0x01c2b400 0x400>; 373 reg = <0x01c2b400 0x400>;
374 interrupts = <0 9 1>; 374 interrupts = <0 9 4>;
375 clocks = <&apb1_gates 2>; 375 clocks = <&apb1_gates 2>;
376 clock-frequency = <100000>; 376 clock-frequency = <100000>;
377 status = "disabled"; 377 status = "disabled";
@@ -380,7 +380,7 @@
380 i2c3: i2c@01c2b800 { 380 i2c3: i2c@01c2b800 {
381 compatible = "allwinner,sun4i-i2c"; 381 compatible = "allwinner,sun4i-i2c";
382 reg = <0x01c2b800 0x400>; 382 reg = <0x01c2b800 0x400>;
383 interrupts = <0 88 1>; 383 interrupts = <0 88 4>;
384 clocks = <&apb1_gates 3>; 384 clocks = <&apb1_gates 3>;
385 clock-frequency = <100000>; 385 clock-frequency = <100000>;
386 status = "disabled"; 386 status = "disabled";
@@ -389,7 +389,7 @@
389 i2c4: i2c@01c2bc00 { 389 i2c4: i2c@01c2bc00 {
390 compatible = "allwinner,sun4i-i2c"; 390 compatible = "allwinner,sun4i-i2c";
391 reg = <0x01c2bc00 0x400>; 391 reg = <0x01c2bc00 0x400>;
392 interrupts = <0 89 1>; 392 interrupts = <0 89 4>;
393 clocks = <&apb1_gates 15>; 393 clocks = <&apb1_gates 15>;
394 clock-frequency = <100000>; 394 clock-frequency = <100000>;
395 status = "disabled"; 395 status = "disabled";
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index 8e1a0245907f..41bca32409fc 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -404,7 +404,7 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
404 BIT(slot)); 404 BIT(slot));
405 if (edma_cc[ctlr]->intr_data[channel].callback) 405 if (edma_cc[ctlr]->intr_data[channel].callback)
406 edma_cc[ctlr]->intr_data[channel].callback( 406 edma_cc[ctlr]->intr_data[channel].callback(
407 channel, DMA_COMPLETE, 407 channel, EDMA_DMA_COMPLETE,
408 edma_cc[ctlr]->intr_data[channel].data); 408 edma_cc[ctlr]->intr_data[channel].data);
409 } 409 }
410 } while (sh_ipr); 410 } while (sh_ipr);
@@ -459,7 +459,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
459 callback) { 459 callback) {
460 edma_cc[ctlr]->intr_data[k]. 460 edma_cc[ctlr]->intr_data[k].
461 callback(k, 461 callback(k,
462 DMA_CC_ERROR, 462 EDMA_DMA_CC_ERROR,
463 edma_cc[ctlr]->intr_data 463 edma_cc[ctlr]->intr_data
464 [k].data); 464 [k].data);
465 } 465 }
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 4a5903e04827..c1df4e9db140 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -69,6 +69,7 @@ CONFIG_KS8851=y
69CONFIG_SMSC911X=y 69CONFIG_SMSC911X=y
70CONFIG_STMMAC_ETH=y 70CONFIG_STMMAC_ETH=y
71CONFIG_MDIO_SUN4I=y 71CONFIG_MDIO_SUN4I=y
72CONFIG_TI_CPSW=y
72CONFIG_KEYBOARD_SPEAR=y 73CONFIG_KEYBOARD_SPEAR=y
73CONFIG_SERIO_AMBAKMI=y 74CONFIG_SERIO_AMBAKMI=y
74CONFIG_SERIAL_8250=y 75CONFIG_SERIAL_8250=y
@@ -133,12 +134,14 @@ CONFIG_USB_GPIO_VBUS=y
133CONFIG_USB_ISP1301=y 134CONFIG_USB_ISP1301=y
134CONFIG_USB_MXS_PHY=y 135CONFIG_USB_MXS_PHY=y
135CONFIG_MMC=y 136CONFIG_MMC=y
137CONFIG_MMC_BLOCK_MINORS=16
136CONFIG_MMC_ARMMMCI=y 138CONFIG_MMC_ARMMMCI=y
137CONFIG_MMC_SDHCI=y 139CONFIG_MMC_SDHCI=y
138CONFIG_MMC_SDHCI_PLTFM=y 140CONFIG_MMC_SDHCI_PLTFM=y
139CONFIG_MMC_SDHCI_ESDHC_IMX=y 141CONFIG_MMC_SDHCI_ESDHC_IMX=y
140CONFIG_MMC_SDHCI_TEGRA=y 142CONFIG_MMC_SDHCI_TEGRA=y
141CONFIG_MMC_SDHCI_SPEAR=y 143CONFIG_MMC_SDHCI_SPEAR=y
144CONFIG_MMC_SDHCI_BCM_KONA=y
142CONFIG_MMC_OMAP=y 145CONFIG_MMC_OMAP=y
143CONFIG_MMC_OMAP_HS=y 146CONFIG_MMC_OMAP_HS=y
144CONFIG_EDAC=y 147CONFIG_EDAC=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 98a50c309b90..bfa80a11e8c7 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -173,6 +173,7 @@ CONFIG_MFD_PALMAS=y
173CONFIG_MFD_TPS65217=y 173CONFIG_MFD_TPS65217=y
174CONFIG_MFD_TPS65910=y 174CONFIG_MFD_TPS65910=y
175CONFIG_TWL6040_CORE=y 175CONFIG_TWL6040_CORE=y
176CONFIG_REGULATOR_FIXED_VOLTAGE=y
176CONFIG_REGULATOR_PALMAS=y 177CONFIG_REGULATOR_PALMAS=y
177CONFIG_REGULATOR_TPS65023=y 178CONFIG_REGULATOR_TPS65023=y
178CONFIG_REGULATOR_TPS6507X=y 179CONFIG_REGULATOR_TPS6507X=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index d57a85badb5e..3e2259b60236 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -12,6 +12,9 @@ CONFIG_NET=y
12CONFIG_PACKET=y 12CONFIG_PACKET=y
13CONFIG_UNIX=y 13CONFIG_UNIX=y
14CONFIG_INET=y 14CONFIG_INET=y
15CONFIG_IP_PNP=y
16CONFIG_IP_PNP_DHCP=y
17CONFIG_IP_PNP_BOOTP=y
15# CONFIG_INET_XFRM_MODE_TRANSPORT is not set 18# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
16# CONFIG_INET_XFRM_MODE_TUNNEL is not set 19# CONFIG_INET_XFRM_MODE_TUNNEL is not set
17# CONFIG_INET_XFRM_MODE_BEET is not set 20# CONFIG_INET_XFRM_MODE_BEET is not set
@@ -58,4 +61,8 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
58CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 61CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
59CONFIG_COMMON_CLK_DEBUG=y 62CONFIG_COMMON_CLK_DEBUG=y
60# CONFIG_IOMMU_SUPPORT is not set 63# CONFIG_IOMMU_SUPPORT is not set
64CONFIG_TMPFS=y
65CONFIG_NFS_FS=y
66CONFIG_ROOT_NFS=y
61CONFIG_NLS=y 67CONFIG_NLS=y
68CONFIG_PRINTK_TIME=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index ac632cc38f24..c6ebc184bf68 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -22,6 +22,7 @@ CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8"
22CONFIG_CPU_FREQ=y 22CONFIG_CPU_FREQ=y
23CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 23CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
24CONFIG_CPU_IDLE=y 24CONFIG_CPU_IDLE=y
25CONFIG_ARM_U8500_CPUIDLE=y
25CONFIG_VFP=y 26CONFIG_VFP=y
26CONFIG_NEON=y 27CONFIG_NEON=y
27CONFIG_PM_RUNTIME=y 28CONFIG_PM_RUNTIME=y
@@ -109,6 +110,8 @@ CONFIG_EXT2_FS_SECURITY=y
109CONFIG_EXT3_FS=y 110CONFIG_EXT3_FS=y
110CONFIG_EXT4_FS=y 111CONFIG_EXT4_FS=y
111CONFIG_VFAT_FS=y 112CONFIG_VFAT_FS=y
113CONFIG_DEVTMPFS=y
114CONFIG_DEVTMPFS_MOUNT=y
112CONFIG_TMPFS=y 115CONFIG_TMPFS=y
113CONFIG_TMPFS_POSIX_ACL=y 116CONFIG_TMPFS_POSIX_ACL=y
114# CONFIG_MISC_FILESYSTEMS is not set 117# CONFIG_MISC_FILESYSTEMS is not set
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
index 9b28f1243bdc..240b29ef17db 100644
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
393 return slot_cnt; 393 return slot_cnt;
394} 394}
395 395
396static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
397{
398 return 0;
399}
400
401static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
402 struct iop_adma_chan *chan)
403{
404 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
405
406 switch (chan->device->id) {
407 case DMA0_ID:
408 case DMA1_ID:
409 return hw_desc.dma->dest_addr;
410 case AAU_ID:
411 return hw_desc.aau->dest_addr;
412 default:
413 BUG();
414 }
415 return 0;
416}
417
418
419static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
420 struct iop_adma_chan *chan)
421{
422 BUG();
423 return 0;
424}
425
426static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, 396static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
427 struct iop_adma_chan *chan) 397 struct iop_adma_chan *chan)
428{ 398{
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
index 122f86d8c991..250760e08103 100644
--- a/arch/arm/include/asm/hardware/iop_adma.h
+++ b/arch/arm/include/asm/hardware/iop_adma.h
@@ -82,8 +82,6 @@ struct iop_adma_chan {
82 * @slot_cnt: total slots used in an transaction (group of operations) 82 * @slot_cnt: total slots used in an transaction (group of operations)
83 * @slots_per_op: number of slots per operation 83 * @slots_per_op: number of slots per operation
84 * @idx: pool index 84 * @idx: pool index
85 * @unmap_src_cnt: number of xor sources
86 * @unmap_len: transaction bytecount
87 * @tx_list: list of descriptors that are associated with one operation 85 * @tx_list: list of descriptors that are associated with one operation
88 * @async_tx: support for the async_tx api 86 * @async_tx: support for the async_tx api
89 * @group_list: list of slots that make up a multi-descriptor transaction 87 * @group_list: list of slots that make up a multi-descriptor transaction
@@ -99,8 +97,6 @@ struct iop_adma_desc_slot {
99 u16 slot_cnt; 97 u16 slot_cnt;
100 u16 slots_per_op; 98 u16 slots_per_op;
101 u16 idx; 99 u16 idx;
102 u16 unmap_src_cnt;
103 size_t unmap_len;
104 struct list_head tx_list; 100 struct list_head tx_list;
105 struct dma_async_tx_descriptor async_tx; 101 struct dma_async_tx_descriptor async_tx;
106 union { 102 union {
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 4dd21457ef9d..6976b03e5213 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -100,23 +100,19 @@
100#define TASK_UNMAPPED_BASE UL(0x00000000) 100#define TASK_UNMAPPED_BASE UL(0x00000000)
101#endif 101#endif
102 102
103#ifndef PHYS_OFFSET
104#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
105#endif
106
107#ifndef END_MEM 103#ifndef END_MEM
108#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 104#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
109#endif 105#endif
110 106
111#ifndef PAGE_OFFSET 107#ifndef PAGE_OFFSET
112#define PAGE_OFFSET (PHYS_OFFSET) 108#define PAGE_OFFSET PLAT_PHYS_OFFSET
113#endif 109#endif
114 110
115/* 111/*
116 * The module can be at any place in ram in nommu mode. 112 * The module can be at any place in ram in nommu mode.
117 */ 113 */
118#define MODULES_END (END_MEM) 114#define MODULES_END (END_MEM)
119#define MODULES_VADDR (PHYS_OFFSET) 115#define MODULES_VADDR PAGE_OFFSET
120 116
121#define XIP_VIRT_ADDR(physaddr) (physaddr) 117#define XIP_VIRT_ADDR(physaddr) (physaddr)
122 118
@@ -157,6 +153,16 @@
157#endif 153#endif
158#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) 154#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
159 155
156/*
157 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
158 * memory. This is used for XIP and NoMMU kernels, or by kernels which
159 * have their own mach/memory.h. Assembly code must always use
160 * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
161 */
162#ifndef PLAT_PHYS_OFFSET
163#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
164#endif
165
160#ifndef __ASSEMBLY__ 166#ifndef __ASSEMBLY__
161 167
162/* 168/*
@@ -226,12 +232,21 @@ static inline phys_addr_t __virt_to_phys(unsigned long x)
226static inline unsigned long __phys_to_virt(phys_addr_t x) 232static inline unsigned long __phys_to_virt(phys_addr_t x)
227{ 233{
228 unsigned long t; 234 unsigned long t;
229 __pv_stub(x, t, "sub", __PV_BITS_31_24); 235
236 /*
237 * 'unsigned long' cast discard upper word when
238 * phys_addr_t is 64 bit, and makes sure that inline
239 * assembler expression receives 32 bit argument
240 * in place where 'r' 32 bit operand is expected.
241 */
242 __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
230 return t; 243 return t;
231} 244}
232 245
233#else 246#else
234 247
248#define PHYS_OFFSET PLAT_PHYS_OFFSET
249
235static inline phys_addr_t __virt_to_phys(unsigned long x) 250static inline phys_addr_t __virt_to_phys(unsigned long x)
236{ 251{
237 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; 252 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
@@ -244,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
244 259
245#endif 260#endif
246#endif 261#endif
247#endif /* __ASSEMBLY__ */
248
249#ifndef PHYS_OFFSET
250#ifdef PLAT_PHYS_OFFSET
251#define PHYS_OFFSET PLAT_PHYS_OFFSET
252#else
253#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
254#endif
255#endif
256
257#ifndef __ASSEMBLY__
258 262
259/* 263/*
260 * PFNs are used to describe any physical page; this means 264 * PFNs are used to describe any physical page; this means
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index be956dbf6bae..1571d126e9dd 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -61,7 +61,7 @@ extern void __pgd_error(const char *file, int line, pgd_t);
61 * mapping to be mapped at. This is particularly important for 61 * mapping to be mapped at. This is particularly important for
62 * non-high vector CPUs. 62 * non-high vector CPUs.
63 */ 63 */
64#define FIRST_USER_ADDRESS PAGE_SIZE 64#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
65 65
66/* 66/*
67 * Use TASK_SIZE as the ceiling argument for free_pgtables() and 67 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index df5e13d64f2c..71a06b293489 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -141,12 +141,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
141#endif 141#endif
142 142
143/* 143/*
144 * We use bit 30 of the preempt_count to indicate that kernel
145 * preemption is occurring. See <asm/hardirq.h>.
146 */
147#define PREEMPT_ACTIVE 0x40000000
148
149/*
150 * thread information flags: 144 * thread information flags:
151 * TIF_SYSCALL_TRACE - syscall trace active 145 * TIF_SYSCALL_TRACE - syscall trace active
152 * TIF_SYSCAL_AUDIT - syscall auditing active 146 * TIF_SYSCAL_AUDIT - syscall auditing active
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 14235ba64a90..716249cc2ee1 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,7 +68,7 @@ ENTRY(stext)
68 68
69#ifdef CONFIG_ARM_MPU 69#ifdef CONFIG_ARM_MPU
70 /* Calculate the size of a region covering just the kernel */ 70 /* Calculate the size of a region covering just the kernel */
71 ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET 71 ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
72 ldr r6, =(_end) @ Cover whole kernel 72 ldr r6, =(_end) @ Cover whole kernel
73 sub r6, r6, r5 @ Minimum size of region to map 73 sub r6, r6, r5 @ Minimum size of region to map
74 clz r6, r6 @ Region size must be 2^N... 74 clz r6, r6 @ Region size must be 2^N...
@@ -213,7 +213,7 @@ ENTRY(__setup_mpu)
213 set_region_nr r0, #MPU_RAM_REGION 213 set_region_nr r0, #MPU_RAM_REGION
214 isb 214 isb
215 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ 215 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
216 ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET 216 ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
217 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) 217 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
218 218
219 setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled 219 setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 7801866e626a..32f317e5828a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -110,7 +110,7 @@ ENTRY(stext)
110 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) 110 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
111 add r8, r8, r4 @ PHYS_OFFSET 111 add r8, r8, r4 @ PHYS_OFFSET
112#else 112#else
113 ldr r8, =PHYS_OFFSET @ always constant in this case 113 ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
114#endif 114#endif
115 115
116 /* 116 /*
@@ -508,6 +508,7 @@ __fixup_smp:
508 teq r0, #0x0 @ '0' on actual UP A9 hardware 508 teq r0, #0x0 @ '0' on actual UP A9 hardware
509 beq __fixup_smp_on_up @ So its an A9 UP 509 beq __fixup_smp_on_up @ So its an A9 UP
510 ldr r0, [r0, #4] @ read SCU Config 510 ldr r0, [r0, #4] @ read SCU Config
511ARM_BE8(rev r0, r0) @ byteswap if big endian
511 and r0, r0, #0x3 @ number of CPUs 512 and r0, r0, #0x3 @ number of CPUs
512 teq r0, #0x0 @ is 1? 513 teq r0, #0x0 @ is 1?
513 movne pc, lr 514 movne pc, lr
@@ -644,7 +645,11 @@ ARM_BE8(rev16 ip, ip)
644 bcc 1b 645 bcc 1b
645 bx lr 646 bx lr
646#else 647#else
648#ifdef CONFIG_CPU_ENDIAN_BE8
649 moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction
650#else
647 moveq r0, #0x400000 @ set bit 22, mov to mvn instruction 651 moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
652#endif
648 b 2f 653 b 2f
6491: ldr ip, [r7, r3] 6541: ldr ip, [r7, r3]
650#ifdef CONFIG_CPU_ENDIAN_BE8 655#ifdef CONFIG_CPU_ENDIAN_BE8
@@ -653,7 +658,7 @@ ARM_BE8(rev16 ip, ip)
653 tst ip, #0x000f0000 @ check the rotation field 658 tst ip, #0x000f0000 @ check the rotation field
654 orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24 659 orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
655 biceq ip, ip, #0x00004000 @ clear bit 22 660 biceq ip, ip, #0x00004000 @ clear bit 22
656 orreq ip, ip, r0, lsl #24 @ mask in offset bits 7-0 661 orreq ip, ip, r0 @ mask in offset bits 7-0
657#else 662#else
658 bic ip, ip, #0x000000ff 663 bic ip, ip, #0x000000ff
659 tst ip, #0xf00 @ check the rotation field 664 tst ip, #0xf00 @ check the rotation field
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 57221e349a7c..f0d180d8b29f 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -14,11 +14,12 @@
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/fncpy.h>
17#include <asm/mach-types.h> 18#include <asm/mach-types.h>
18#include <asm/smp_plat.h> 19#include <asm/smp_plat.h>
19#include <asm/system_misc.h> 20#include <asm/system_misc.h>
20 21
21extern const unsigned char relocate_new_kernel[]; 22extern void relocate_new_kernel(void);
22extern const unsigned int relocate_new_kernel_size; 23extern const unsigned int relocate_new_kernel_size;
23 24
24extern unsigned long kexec_start_address; 25extern unsigned long kexec_start_address;
@@ -142,6 +143,8 @@ void machine_kexec(struct kimage *image)
142{ 143{
143 unsigned long page_list; 144 unsigned long page_list;
144 unsigned long reboot_code_buffer_phys; 145 unsigned long reboot_code_buffer_phys;
146 unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
147 unsigned long reboot_entry_phys;
145 void *reboot_code_buffer; 148 void *reboot_code_buffer;
146 149
147 /* 150 /*
@@ -168,16 +171,16 @@ void machine_kexec(struct kimage *image)
168 171
169 172
170 /* copy our kernel relocation code to the control code page */ 173 /* copy our kernel relocation code to the control code page */
171 memcpy(reboot_code_buffer, 174 reboot_entry = fncpy(reboot_code_buffer,
172 relocate_new_kernel, relocate_new_kernel_size); 175 reboot_entry,
176 relocate_new_kernel_size);
177 reboot_entry_phys = (unsigned long)reboot_entry +
178 (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
173 179
174
175 flush_icache_range((unsigned long) reboot_code_buffer,
176 (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
177 printk(KERN_INFO "Bye!\n"); 180 printk(KERN_INFO "Bye!\n");
178 181
179 if (kexec_reinit) 182 if (kexec_reinit)
180 kexec_reinit(); 183 kexec_reinit();
181 184
182 soft_restart(reboot_code_buffer_phys); 185 soft_restart(reboot_entry_phys);
183} 186}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94f6b05f9e24..92f7b15dd221 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
404unsigned long get_wchan(struct task_struct *p) 404unsigned long get_wchan(struct task_struct *p)
405{ 405{
406 struct stackframe frame; 406 struct stackframe frame;
407 unsigned long stack_page;
407 int count = 0; 408 int count = 0;
408 if (!p || p == current || p->state == TASK_RUNNING) 409 if (!p || p == current || p->state == TASK_RUNNING)
409 return 0; 410 return 0;
@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
412 frame.sp = thread_saved_sp(p); 413 frame.sp = thread_saved_sp(p);
413 frame.lr = 0; /* recovered from the stack */ 414 frame.lr = 0; /* recovered from the stack */
414 frame.pc = thread_saved_pc(p); 415 frame.pc = thread_saved_pc(p);
416 stack_page = (unsigned long)task_stack_page(p);
415 do { 417 do {
416 int ret = unwind_frame(&frame); 418 if (frame.sp < stack_page ||
417 if (ret < 0) 419 frame.sp >= stack_page + THREAD_SIZE ||
420 unwind_frame(&frame) < 0)
418 return 0; 421 return 0;
419 if (!in_sched_functions(frame.pc)) 422 if (!in_sched_functions(frame.pc))
420 return frame.pc; 423 return frame.pc;
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index d0cdedf4864d..95858966d84e 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -2,10 +2,12 @@
2 * relocate_kernel.S - put the kernel image in place to boot 2 * relocate_kernel.S - put the kernel image in place to boot
3 */ 3 */
4 4
5#include <linux/linkage.h>
5#include <asm/kexec.h> 6#include <asm/kexec.h>
6 7
7 .globl relocate_new_kernel 8 .align 3 /* not needed for this code, but keeps fncpy() happy */
8relocate_new_kernel: 9
10ENTRY(relocate_new_kernel)
9 11
10 ldr r0,kexec_indirection_page 12 ldr r0,kexec_indirection_page
11 ldr r1,kexec_start_address 13 ldr r1,kexec_start_address
@@ -79,6 +81,8 @@ kexec_mach_type:
79kexec_boot_atags: 81kexec_boot_atags:
80 .long 0x0 82 .long 0x0
81 83
84ENDPROC(relocate_new_kernel)
85
82relocate_new_kernel_end: 86relocate_new_kernel_end:
83 87
84 .globl relocate_new_kernel_size 88 .globl relocate_new_kernel_size
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6a1b8a81b1ae..987a7f5bce5f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p)
873 machine_desc = mdesc; 873 machine_desc = mdesc;
874 machine_name = mdesc->name; 874 machine_name = mdesc->name;
875 875
876 setup_dma_zone(mdesc);
877
878 if (mdesc->reboot_mode != REBOOT_HARD) 876 if (mdesc->reboot_mode != REBOOT_HARD)
879 reboot_mode = mdesc->reboot_mode; 877 reboot_mode = mdesc->reboot_mode;
880 878
@@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p)
892 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 890 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
893 891
894 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); 892 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
893 setup_dma_zone(mdesc);
895 sanity_check_meminfo(); 894 sanity_check_meminfo();
896 arm_memblock_init(&meminfo, mdesc); 895 arm_memblock_init(&meminfo, mdesc);
897 896
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
index 3c5d0f2170fd..b84d0cb13682 100644
--- a/arch/arm/kernel/sigreturn_codes.S
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -30,6 +30,27 @@
30 * snippets. 30 * snippets.
31 */ 31 */
32 32
33/*
34 * In CPU_THUMBONLY case kernel arm opcodes are not allowed.
35 * Note in this case codes skips those instructions but it uses .org
36 * directive to keep correct layout of sigreturn_codes array.
37 */
38#ifndef CONFIG_CPU_THUMBONLY
39#define ARM_OK(code...) code
40#else
41#define ARM_OK(code...)
42#endif
43
44 .macro arm_slot n
45 .org sigreturn_codes + 12 * (\n)
46ARM_OK( .arm )
47 .endm
48
49 .macro thumb_slot n
50 .org sigreturn_codes + 12 * (\n) + 8
51 .thumb
52 .endm
53
33#if __LINUX_ARM_ARCH__ <= 4 54#if __LINUX_ARM_ARCH__ <= 4
34 /* 55 /*
35 * Note we manually set minimally required arch that supports 56 * Note we manually set minimally required arch that supports
@@ -45,26 +66,27 @@
45 .global sigreturn_codes 66 .global sigreturn_codes
46 .type sigreturn_codes, #object 67 .type sigreturn_codes, #object
47 68
48 .arm 69 .align
49 70
50sigreturn_codes: 71sigreturn_codes:
51 72
52 /* ARM sigreturn syscall code snippet */ 73 /* ARM sigreturn syscall code snippet */
53 mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) 74 arm_slot 0
54 swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) 75ARM_OK( mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) )
76ARM_OK( swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
55 77
56 /* Thumb sigreturn syscall code snippet */ 78 /* Thumb sigreturn syscall code snippet */
57 .thumb 79 thumb_slot 0
58 movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) 80 movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
59 swi #0 81 swi #0
60 82
61 /* ARM sigreturn_rt syscall code snippet */ 83 /* ARM sigreturn_rt syscall code snippet */
62 .arm 84 arm_slot 1
63 mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) 85ARM_OK( mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) )
64 swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) 86ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
65 87
66 /* Thumb sigreturn_rt syscall code snippet */ 88 /* Thumb sigreturn_rt syscall code snippet */
67 .thumb 89 thumb_slot 1
68 movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) 90 movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
69 swi #0 91 swi #0
70 92
@@ -74,7 +96,7 @@ sigreturn_codes:
74 * it is thumb case or not, so we need additional 96 * it is thumb case or not, so we need additional
75 * word after real last entry. 97 * word after real last entry.
76 */ 98 */
77 .arm 99 arm_slot 2
78 .space 4 100 .space 4
79 101
80 .size sigreturn_codes, . - sigreturn_codes 102 .size sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 00f79e59985b..af4e8c8a5422 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
31 high = ALIGN(low, THREAD_SIZE); 31 high = ALIGN(low, THREAD_SIZE);
32 32
33 /* check current frame pointer is within bounds */ 33 /* check current frame pointer is within bounds */
34 if (fp < (low + 12) || fp + 4 >= high) 34 if (fp < low + 12 || fp > high - 4)
35 return -EINVAL; 35 return -EINVAL;
36 36
37 /* restore the registers from the stack frame */ 37 /* restore the registers from the stack frame */
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6125f259b7b5..7940241f0576 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -509,9 +509,10 @@ static inline int
509__do_cache_op(unsigned long start, unsigned long end) 509__do_cache_op(unsigned long start, unsigned long end)
510{ 510{
511 int ret; 511 int ret;
512 unsigned long chunk = PAGE_SIZE;
513 512
514 do { 513 do {
514 unsigned long chunk = min(PAGE_SIZE, end - start);
515
515 if (signal_pending(current)) { 516 if (signal_pending(current)) {
516 struct thread_info *ti = current_thread_info(); 517 struct thread_info *ti = current_thread_info();
517 518
@@ -856,7 +857,7 @@ static void __init kuser_init(void *vectors)
856 memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); 857 memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
857} 858}
858#else 859#else
859static void __init kuser_init(void *vectors) 860static inline void __init kuser_init(void *vectors)
860{ 861{
861} 862}
862#endif 863#endif
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 371958370de4..580906989db1 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -334,6 +334,17 @@ out:
334 return err; 334 return err;
335} 335}
336 336
337static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
338{
339 if (!is_vmalloc_addr(kaddr)) {
340 BUG_ON(!virt_addr_valid(kaddr));
341 return __pa(kaddr);
342 } else {
343 return page_to_phys(vmalloc_to_page(kaddr)) +
344 offset_in_page(kaddr);
345 }
346}
347
337/** 348/**
338 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode 349 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
339 * @from: The virtual kernel start address of the range 350 * @from: The virtual kernel start address of the range
@@ -345,16 +356,27 @@ out:
345 */ 356 */
346int create_hyp_mappings(void *from, void *to) 357int create_hyp_mappings(void *from, void *to)
347{ 358{
348 unsigned long phys_addr = virt_to_phys(from); 359 phys_addr_t phys_addr;
360 unsigned long virt_addr;
349 unsigned long start = KERN_TO_HYP((unsigned long)from); 361 unsigned long start = KERN_TO_HYP((unsigned long)from);
350 unsigned long end = KERN_TO_HYP((unsigned long)to); 362 unsigned long end = KERN_TO_HYP((unsigned long)to);
351 363
352 /* Check for a valid kernel memory mapping */ 364 start = start & PAGE_MASK;
353 if (!virt_addr_valid(from) || !virt_addr_valid(to - 1)) 365 end = PAGE_ALIGN(end);
354 return -EINVAL;
355 366
356 return __create_hyp_mappings(hyp_pgd, start, end, 367 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
357 __phys_to_pfn(phys_addr), PAGE_HYP); 368 int err;
369
370 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
371 err = __create_hyp_mappings(hyp_pgd, virt_addr,
372 virt_addr + PAGE_SIZE,
373 __phys_to_pfn(phys_addr),
374 PAGE_HYP);
375 if (err)
376 return err;
377 }
378
379 return 0;
358} 380}
359 381
360/** 382/**
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index e0c68d5bb7dc..52886b89706c 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -10,7 +10,7 @@ UNWIND( .fnstart )
10 and r3, r0, #31 @ Get bit offset 10 and r3, r0, #31 @ Get bit offset
11 mov r0, r0, lsr #5 11 mov r0, r0, lsr #5
12 add r1, r1, r0, lsl #2 @ Get word offset 12 add r1, r1, r0, lsl #2 @ Get word offset
13#if __LINUX_ARM_ARCH__ >= 7 13#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
14 .arch_extension mp 14 .arch_extension mp
15 ALT_SMP(W(pldw) [r1]) 15 ALT_SMP(W(pldw) [r1])
16 ALT_UP(W(nop)) 16 ALT_UP(W(nop))
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index 36b668d8e121..bc1033b897b4 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -40,6 +40,7 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
40/* 40/*
41 * loops = r0 * HZ * loops_per_jiffy / 1000000 41 * loops = r0 * HZ * loops_per_jiffy / 1000000
42 */ 42 */
43 .align 3
43 44
44@ Delay routine 45@ Delay routine
45ENTRY(__loop_delay) 46ENTRY(__loop_delay)
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index f607deb40f4d..bc7b363a3083 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -174,7 +174,6 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
174static struct clock_event_device clkevt = { 174static struct clock_event_device clkevt = {
175 .name = "at91_tick", 175 .name = "at91_tick",
176 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 176 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
177 .shift = 32,
178 .rating = 150, 177 .rating = 150,
179 .set_next_event = clkevt32k_next_event, 178 .set_next_event = clkevt32k_next_event,
180 .set_mode = clkevt32k_mode, 179 .set_mode = clkevt32k_mode,
@@ -265,11 +264,9 @@ void __init at91rm9200_timer_init(void)
265 at91_st_write(AT91_ST_RTMR, 1); 264 at91_st_write(AT91_ST_RTMR, 1);
266 265
267 /* Setup timer clockevent, with minimum of two ticks (important!!) */ 266 /* Setup timer clockevent, with minimum of two ticks (important!!) */
268 clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
269 clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
270 clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
271 clkevt.cpumask = cpumask_of(0); 267 clkevt.cpumask = cpumask_of(0);
272 clockevents_register_device(&clkevt); 268 clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK,
269 2, AT91_ST_ALMV);
273 270
274 /* register clocksource */ 271 /* register clocksource */
275 clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); 272 clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 3ed190ce062b..c5101dcb4fb0 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -16,7 +16,11 @@
16#include <mach/at91_ramc.h> 16#include <mach/at91_ramc.h>
17#include <mach/at91rm9200_sdramc.h> 17#include <mach/at91rm9200_sdramc.h>
18 18
19#ifdef CONFIG_PM
19extern void at91_pm_set_standby(void (*at91_standby)(void)); 20extern void at91_pm_set_standby(void (*at91_standby)(void));
21#else
22static inline void at91_pm_set_standby(void (*at91_standby)(void)) { }
23#endif
20 24
21/* 25/*
22 * The AT91RM9200 goes into self-refresh mode with this command, and will 26 * The AT91RM9200 goes into self-refresh mode with this command, and will
diff --git a/arch/arm/mach-at91/sama5d3.c b/arch/arm/mach-at91/sama5d3.c
index 3ea86428ee09..a28873fe3049 100644
--- a/arch/arm/mach-at91/sama5d3.c
+++ b/arch/arm/mach-at91/sama5d3.c
@@ -95,19 +95,19 @@ static struct clk twi0_clk = {
95 .name = "twi0_clk", 95 .name = "twi0_clk",
96 .pid = SAMA5D3_ID_TWI0, 96 .pid = SAMA5D3_ID_TWI0,
97 .type = CLK_TYPE_PERIPHERAL, 97 .type = CLK_TYPE_PERIPHERAL,
98 .div = AT91_PMC_PCR_DIV2, 98 .div = AT91_PMC_PCR_DIV8,
99}; 99};
100static struct clk twi1_clk = { 100static struct clk twi1_clk = {
101 .name = "twi1_clk", 101 .name = "twi1_clk",
102 .pid = SAMA5D3_ID_TWI1, 102 .pid = SAMA5D3_ID_TWI1,
103 .type = CLK_TYPE_PERIPHERAL, 103 .type = CLK_TYPE_PERIPHERAL,
104 .div = AT91_PMC_PCR_DIV2, 104 .div = AT91_PMC_PCR_DIV8,
105}; 105};
106static struct clk twi2_clk = { 106static struct clk twi2_clk = {
107 .name = "twi2_clk", 107 .name = "twi2_clk",
108 .pid = SAMA5D3_ID_TWI2, 108 .pid = SAMA5D3_ID_TWI2,
109 .type = CLK_TYPE_PERIPHERAL, 109 .type = CLK_TYPE_PERIPHERAL,
110 .div = AT91_PMC_PCR_DIV2, 110 .div = AT91_PMC_PCR_DIV8,
111}; 111};
112static struct clk mmc0_clk = { 112static struct clk mmc0_clk = {
113 .name = "mci0_clk", 113 .name = "mci0_clk",
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index c46eccbbd512..78829c513fdc 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -487,7 +487,7 @@ int __init da8xx_register_emac(void)
487 487
488static struct resource da830_mcasp1_resources[] = { 488static struct resource da830_mcasp1_resources[] = {
489 { 489 {
490 .name = "mcasp1", 490 .name = "mpu",
491 .start = DAVINCI_DA830_MCASP1_REG_BASE, 491 .start = DAVINCI_DA830_MCASP1_REG_BASE,
492 .end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1, 492 .end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1,
493 .flags = IORESOURCE_MEM, 493 .flags = IORESOURCE_MEM,
@@ -515,7 +515,7 @@ static struct platform_device da830_mcasp1_device = {
515 515
516static struct resource da850_mcasp_resources[] = { 516static struct resource da850_mcasp_resources[] = {
517 { 517 {
518 .name = "mcasp", 518 .name = "mpu",
519 .start = DAVINCI_DA8XX_MCASP0_REG_BASE, 519 .start = DAVINCI_DA8XX_MCASP0_REG_BASE,
520 .end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1, 520 .end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1,
521 .flags = IORESOURCE_MEM, 521 .flags = IORESOURCE_MEM,
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index ef9ff1fb6f52..6117fc644188 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -641,6 +641,7 @@ static struct platform_device dm355_edma_device = {
641 641
642static struct resource dm355_asp1_resources[] = { 642static struct resource dm355_asp1_resources[] = {
643 { 643 {
644 .name = "mpu",
644 .start = DAVINCI_ASP1_BASE, 645 .start = DAVINCI_ASP1_BASE,
645 .end = DAVINCI_ASP1_BASE + SZ_8K - 1, 646 .end = DAVINCI_ASP1_BASE + SZ_8K - 1,
646 .flags = IORESOURCE_MEM, 647 .flags = IORESOURCE_MEM,
@@ -906,7 +907,7 @@ static struct davinci_gpio_platform_data dm355_gpio_platform_data = {
906int __init dm355_gpio_register(void) 907int __init dm355_gpio_register(void)
907{ 908{
908 return davinci_gpio_register(dm355_gpio_resources, 909 return davinci_gpio_register(dm355_gpio_resources,
909 sizeof(dm355_gpio_resources), 910 ARRAY_SIZE(dm355_gpio_resources),
910 &dm355_gpio_platform_data); 911 &dm355_gpio_platform_data);
911} 912}
912/*----------------------------------------------------------------------*/ 913/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 1511a0680f9a..d7c6f85d3fc9 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -720,7 +720,7 @@ static struct davinci_gpio_platform_data dm365_gpio_platform_data = {
720int __init dm365_gpio_register(void) 720int __init dm365_gpio_register(void)
721{ 721{
722 return davinci_gpio_register(dm365_gpio_resources, 722 return davinci_gpio_register(dm365_gpio_resources,
723 sizeof(dm365_gpio_resources), 723 ARRAY_SIZE(dm365_gpio_resources),
724 &dm365_gpio_platform_data); 724 &dm365_gpio_platform_data);
725} 725}
726 726
@@ -942,6 +942,7 @@ static struct platform_device dm365_edma_device = {
942 942
943static struct resource dm365_asp_resources[] = { 943static struct resource dm365_asp_resources[] = {
944 { 944 {
945 .name = "mpu",
945 .start = DAVINCI_DM365_ASP0_BASE, 946 .start = DAVINCI_DM365_ASP0_BASE,
946 .end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1, 947 .end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1,
947 .flags = IORESOURCE_MEM, 948 .flags = IORESOURCE_MEM,
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 143a3217e8ef..3ce47997bb46 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -572,6 +572,7 @@ static struct platform_device dm644x_edma_device = {
572/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ 572/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */
573static struct resource dm644x_asp_resources[] = { 573static struct resource dm644x_asp_resources[] = {
574 { 574 {
575 .name = "mpu",
575 .start = DAVINCI_ASP0_BASE, 576 .start = DAVINCI_ASP0_BASE,
576 .end = DAVINCI_ASP0_BASE + SZ_8K - 1, 577 .end = DAVINCI_ASP0_BASE + SZ_8K - 1,
577 .flags = IORESOURCE_MEM, 578 .flags = IORESOURCE_MEM,
@@ -792,7 +793,7 @@ static struct davinci_gpio_platform_data dm644_gpio_platform_data = {
792int __init dm644x_gpio_register(void) 793int __init dm644x_gpio_register(void)
793{ 794{
794 return davinci_gpio_register(dm644_gpio_resources, 795 return davinci_gpio_register(dm644_gpio_resources,
795 sizeof(dm644_gpio_resources), 796 ARRAY_SIZE(dm644_gpio_resources),
796 &dm644_gpio_platform_data); 797 &dm644_gpio_platform_data);
797} 798}
798/*----------------------------------------------------------------------*/ 799/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 2a73f299c1d0..0e81fea65e7f 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -621,7 +621,7 @@ static struct platform_device dm646x_edma_device = {
621 621
622static struct resource dm646x_mcasp0_resources[] = { 622static struct resource dm646x_mcasp0_resources[] = {
623 { 623 {
624 .name = "mcasp0", 624 .name = "mpu",
625 .start = DAVINCI_DM646X_MCASP0_REG_BASE, 625 .start = DAVINCI_DM646X_MCASP0_REG_BASE,
626 .end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1, 626 .end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1,
627 .flags = IORESOURCE_MEM, 627 .flags = IORESOURCE_MEM,
@@ -641,7 +641,7 @@ static struct resource dm646x_mcasp0_resources[] = {
641 641
642static struct resource dm646x_mcasp1_resources[] = { 642static struct resource dm646x_mcasp1_resources[] = {
643 { 643 {
644 .name = "mcasp1", 644 .name = "mpu",
645 .start = DAVINCI_DM646X_MCASP1_REG_BASE, 645 .start = DAVINCI_DM646X_MCASP1_REG_BASE,
646 .end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1, 646 .end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1,
647 .flags = IORESOURCE_MEM, 647 .flags = IORESOURCE_MEM,
@@ -769,7 +769,7 @@ static struct davinci_gpio_platform_data dm646x_gpio_platform_data = {
769int __init dm646x_gpio_register(void) 769int __init dm646x_gpio_register(void)
770{ 770{
771 return davinci_gpio_register(dm646x_gpio_resources, 771 return davinci_gpio_register(dm646x_gpio_resources,
772 sizeof(dm646x_gpio_resources), 772 ARRAY_SIZE(dm646x_gpio_resources),
773 &dm646x_gpio_platform_data); 773 &dm646x_gpio_platform_data);
774} 774}
775/*----------------------------------------------------------------------*/ 775/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 2739ca2c1334..e0091685fd48 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <video/vga.h>
18 19
19#include <asm/pgtable.h> 20#include <asm/pgtable.h>
20#include <asm/page.h> 21#include <asm/page.h>
@@ -196,6 +197,8 @@ void __init footbridge_map_io(void)
196 iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc)); 197 iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc));
197 pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO)); 198 pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO));
198 } 199 }
200
201 vga_base = PCIMEM_BASE;
199} 202}
200 203
201void footbridge_restart(enum reboot_mode mode, const char *cmd) 204void footbridge_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 3490a24f969e..7c2fdae9a38b 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -18,7 +18,6 @@
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <video/vga.h>
22 21
23#include <asm/irq.h> 22#include <asm/irq.h>
24#include <asm/mach/pci.h> 23#include <asm/mach/pci.h>
@@ -291,7 +290,6 @@ void __init dc21285_preinit(void)
291 int cfn_mode; 290 int cfn_mode;
292 291
293 pcibios_min_mem = 0x81000000; 292 pcibios_min_mem = 0x81000000;
294 vga_base = PCIMEM_BASE;
295 293
296 mem_size = (unsigned int)high_memory - PAGE_OFFSET; 294 mem_size = (unsigned int)high_memory - PAGE_OFFSET;
297 for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1) 295 for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1)
diff --git a/arch/arm/mach-footbridge/ebsa285.c b/arch/arm/mach-footbridge/ebsa285.c
index b08243500e2e..1a7235fb52ac 100644
--- a/arch/arm/mach-footbridge/ebsa285.c
+++ b/arch/arm/mach-footbridge/ebsa285.c
@@ -30,21 +30,24 @@ static const struct {
30 const char *name; 30 const char *name;
31 const char *trigger; 31 const char *trigger;
32} ebsa285_leds[] = { 32} ebsa285_leds[] = {
33 { "ebsa285:amber", "heartbeat", }, 33 { "ebsa285:amber", "cpu0", },
34 { "ebsa285:green", "cpu0", }, 34 { "ebsa285:green", "heartbeat", },
35 { "ebsa285:red",}, 35 { "ebsa285:red",},
36}; 36};
37 37
38static unsigned char hw_led_state;
39
38static void ebsa285_led_set(struct led_classdev *cdev, 40static void ebsa285_led_set(struct led_classdev *cdev,
39 enum led_brightness b) 41 enum led_brightness b)
40{ 42{
41 struct ebsa285_led *led = container_of(cdev, 43 struct ebsa285_led *led = container_of(cdev,
42 struct ebsa285_led, cdev); 44 struct ebsa285_led, cdev);
43 45
44 if (b != LED_OFF) 46 if (b == LED_OFF)
45 *XBUS_LEDS |= led->mask; 47 hw_led_state |= led->mask;
46 else 48 else
47 *XBUS_LEDS &= ~led->mask; 49 hw_led_state &= ~led->mask;
50 *XBUS_LEDS = hw_led_state;
48} 51}
49 52
50static enum led_brightness ebsa285_led_get(struct led_classdev *cdev) 53static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
@@ -52,18 +55,19 @@ static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
52 struct ebsa285_led *led = container_of(cdev, 55 struct ebsa285_led *led = container_of(cdev,
53 struct ebsa285_led, cdev); 56 struct ebsa285_led, cdev);
54 57
55 return (*XBUS_LEDS & led->mask) ? LED_FULL : LED_OFF; 58 return hw_led_state & led->mask ? LED_OFF : LED_FULL;
56} 59}
57 60
58static int __init ebsa285_leds_init(void) 61static int __init ebsa285_leds_init(void)
59{ 62{
60 int i; 63 int i;
61 64
62 if (machine_is_ebsa285()) 65 if (!machine_is_ebsa285())
63 return -ENODEV; 66 return -ENODEV;
64 67
65 /* 3 LEDS All ON */ 68 /* 3 LEDS all off */
66 *XBUS_LEDS |= XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED; 69 hw_led_state = XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED;
70 *XBUS_LEDS = hw_led_state;
67 71
68 for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) { 72 for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) {
69 struct ebsa285_led *led; 73 struct ebsa285_led *led;
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index b3d7e5634b83..bd3bf66ce344 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -17,12 +17,15 @@
17#include <linux/clkdev.h> 17#include <linux/clkdev.h>
18#include <linux/clocksource.h> 18#include <linux/clocksource.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/input.h>
20#include <linux/io.h> 21#include <linux/io.h>
21#include <linux/irqchip.h> 22#include <linux/irqchip.h>
23#include <linux/mailbox.h>
22#include <linux/of.h> 24#include <linux/of.h>
23#include <linux/of_irq.h> 25#include <linux/of_irq.h>
24#include <linux/of_platform.h> 26#include <linux/of_platform.h>
25#include <linux/of_address.h> 27#include <linux/of_address.h>
28#include <linux/reboot.h>
26#include <linux/amba/bus.h> 29#include <linux/amba/bus.h>
27#include <linux/platform_device.h> 30#include <linux/platform_device.h>
28 31
@@ -130,6 +133,24 @@ static struct platform_device highbank_cpuidle_device = {
130 .name = "cpuidle-calxeda", 133 .name = "cpuidle-calxeda",
131}; 134};
132 135
136static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data)
137{
138 u32 key = *(u32 *)data;
139
140 if (event != 0x1000)
141 return 0;
142
143 if (key == KEY_POWER)
144 orderly_poweroff(false);
145 else if (key == 0xffff)
146 ctrl_alt_del();
147
148 return 0;
149}
150static struct notifier_block hb_keys_nb = {
151 .notifier_call = hb_keys_notifier,
152};
153
133static void __init highbank_init(void) 154static void __init highbank_init(void)
134{ 155{
135 struct device_node *np; 156 struct device_node *np;
@@ -145,6 +166,8 @@ static void __init highbank_init(void)
145 bus_register_notifier(&platform_bus_type, &highbank_platform_nb); 166 bus_register_notifier(&platform_bus_type, &highbank_platform_nb);
146 bus_register_notifier(&amba_bustype, &highbank_amba_nb); 167 bus_register_notifier(&amba_bustype, &highbank_amba_nb);
147 168
169 pl320_ipc_register_notifier(&hb_keys_nb);
170
148 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 171 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
149 172
150 if (psci_ops.cpu_suspend) 173 if (psci_ops.cpu_suspend)
diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h
index 6d3782d85a9f..a86fd0ed7757 100644
--- a/arch/arm/mach-iop13xx/include/mach/adma.h
+++ b/arch/arm/mach-iop13xx/include/mach/adma.h
@@ -218,20 +218,6 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
218#define iop_chan_pq_slot_count iop_chan_xor_slot_count 218#define iop_chan_pq_slot_count iop_chan_xor_slot_count
219#define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count 219#define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count
220 220
221static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
222 struct iop_adma_chan *chan)
223{
224 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
225 return hw_desc->dest_addr;
226}
227
228static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
229 struct iop_adma_chan *chan)
230{
231 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
232 return hw_desc->q_dest_addr;
233}
234
235static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, 221static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
236 struct iop_adma_chan *chan) 222 struct iop_adma_chan *chan)
237{ 223{
@@ -350,18 +336,6 @@ iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
350 hw_desc->desc_ctrl = u_desc_ctrl.value; 336 hw_desc->desc_ctrl = u_desc_ctrl.value;
351} 337}
352 338
353static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
354{
355 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
356 union {
357 u32 value;
358 struct iop13xx_adma_desc_ctrl field;
359 } u_desc_ctrl;
360
361 u_desc_ctrl.value = hw_desc->desc_ctrl;
362 return u_desc_ctrl.field.pq_xfer_en;
363}
364
365static inline void 339static inline void
366iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, 340iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
367 unsigned long flags) 341 unsigned long flags)
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 1f25f3e99c05..adcef406ff0a 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -19,11 +19,11 @@ secure-common = omap-smc.o omap-secure.o
19 19
20obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) 20obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
21obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common) 21obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
22obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common) 22obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common)
23obj-$(CONFIG_SOC_AM33XX) += irq.o $(hwmod-common) 23obj-$(CONFIG_SOC_AM33XX) += irq.o $(hwmod-common)
24obj-$(CONFIG_SOC_OMAP5) += prm44xx.o $(hwmod-common) $(secure-common) 24obj-$(CONFIG_SOC_OMAP5) += $(hwmod-common) $(secure-common)
25obj-$(CONFIG_SOC_AM43XX) += $(hwmod-common) $(secure-common) 25obj-$(CONFIG_SOC_AM43XX) += $(hwmod-common) $(secure-common)
26obj-$(CONFIG_SOC_DRA7XX) += prm44xx.o $(hwmod-common) $(secure-common) 26obj-$(CONFIG_SOC_DRA7XX) += $(hwmod-common) $(secure-common)
27 27
28ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),) 28ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),)
29obj-y += mcbsp.o 29obj-y += mcbsp.o
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 19f1652e94cf..8d972ff18c56 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -131,6 +131,24 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)")
131 .dt_compat = omap3_gp_boards_compat, 131 .dt_compat = omap3_gp_boards_compat,
132 .restart = omap3xxx_restart, 132 .restart = omap3xxx_restart,
133MACHINE_END 133MACHINE_END
134
135static const char *am3517_boards_compat[] __initdata = {
136 "ti,am3517",
137 NULL,
138};
139
140DT_MACHINE_START(AM3517_DT, "Generic AM3517 (Flattened Device Tree)")
141 .reserve = omap_reserve,
142 .map_io = omap3_map_io,
143 .init_early = am35xx_init_early,
144 .init_irq = omap_intc_of_init,
145 .handle_irq = omap3_intc_handle_irq,
146 .init_machine = omap_generic_init,
147 .init_late = omap3_init_late,
148 .init_time = omap3_gptimer_timer_init,
149 .dt_compat = am3517_boards_compat,
150 .restart = omap3xxx_restart,
151MACHINE_END
134#endif 152#endif
135 153
136#ifdef CONFIG_SOC_AM33XX 154#ifdef CONFIG_SOC_AM33XX
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index f7644febee81..e30ef6797c63 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -299,7 +299,6 @@ struct omap_sdrc_params;
299extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0, 299extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
300 struct omap_sdrc_params *sdrc_cs1); 300 struct omap_sdrc_params *sdrc_cs1);
301struct omap2_hsmmc_info; 301struct omap2_hsmmc_info;
302extern int omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers);
303extern void omap_reserve(void); 302extern void omap_reserve(void);
304 303
305struct omap_hwmod; 304struct omap_hwmod;
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index a4e536b11ec9..58347bb874a0 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -32,7 +32,6 @@
32 32
33#include "soc.h" 33#include "soc.h"
34#include "iomap.h" 34#include "iomap.h"
35#include "mux.h"
36#include "control.h" 35#include "control.h"
37#include "display.h" 36#include "display.h"
38#include "prm.h" 37#include "prm.h"
@@ -102,90 +101,13 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
102 { "dss_hdmi", "omapdss_hdmi", -1 }, 101 { "dss_hdmi", "omapdss_hdmi", -1 },
103}; 102};
104 103
105static void __init omap4_tpd12s015_mux_pads(void)
106{
107 omap_mux_init_signal("hdmi_cec",
108 OMAP_PIN_INPUT_PULLUP);
109 omap_mux_init_signal("hdmi_ddc_scl",
110 OMAP_PIN_INPUT_PULLUP);
111 omap_mux_init_signal("hdmi_ddc_sda",
112 OMAP_PIN_INPUT_PULLUP);
113}
114
115static void __init omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
116{
117 u32 reg;
118 u16 control_i2c_1;
119
120 /*
121 * CONTROL_I2C_1: HDMI_DDC_SDA_PULLUPRESX (bit 28) and
122 * HDMI_DDC_SCL_PULLUPRESX (bit 24) are set to disable
123 * internal pull up resistor.
124 */
125 if (flags & OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP) {
126 control_i2c_1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_I2C_1;
127 reg = omap4_ctrl_pad_readl(control_i2c_1);
128 reg |= (OMAP4_HDMI_DDC_SDA_PULLUPRESX_MASK |
129 OMAP4_HDMI_DDC_SCL_PULLUPRESX_MASK);
130 omap4_ctrl_pad_writel(reg, control_i2c_1);
131 }
132}
133
134static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
135{
136 u32 enable_mask, enable_shift;
137 u32 pipd_mask, pipd_shift;
138 u32 reg;
139
140 if (dsi_id == 0) {
141 enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
142 enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
143 pipd_mask = OMAP4_DSI1_PIPD_MASK;
144 pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
145 } else if (dsi_id == 1) {
146 enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
147 enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
148 pipd_mask = OMAP4_DSI2_PIPD_MASK;
149 pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
150 } else {
151 return -ENODEV;
152 }
153
154 reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
155
156 reg &= ~enable_mask;
157 reg &= ~pipd_mask;
158
159 reg |= (lanes << enable_shift) & enable_mask;
160 reg |= (lanes << pipd_shift) & pipd_mask;
161
162 omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
163
164 return 0;
165}
166
167int __init omap_hdmi_init(enum omap_hdmi_flags flags)
168{
169 if (cpu_is_omap44xx()) {
170 omap4_hdmi_mux_pads(flags);
171 omap4_tpd12s015_mux_pads();
172 }
173
174 return 0;
175}
176
177static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) 104static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
178{ 105{
179 if (cpu_is_omap44xx())
180 return omap4_dsi_mux_pads(dsi_id, lane_mask);
181
182 return 0; 106 return 0;
183} 107}
184 108
185static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) 109static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
186{ 110{
187 if (cpu_is_omap44xx())
188 omap4_dsi_mux_pads(dsi_id, 0);
189} 111}
190 112
191static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) 113static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index 365bfd3d9c68..dadccc91488c 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -223,7 +223,7 @@ void __init omap_4430sdp_display_init_of(void)
223static struct connector_dvi_platform_data omap3_igep2_dvi_connector_pdata = { 223static struct connector_dvi_platform_data omap3_igep2_dvi_connector_pdata = {
224 .name = "dvi", 224 .name = "dvi",
225 .source = "tfp410.0", 225 .source = "tfp410.0",
226 .i2c_bus_num = 3, 226 .i2c_bus_num = 2,
227}; 227};
228 228
229static struct platform_device omap3_igep2_dvi_connector_device = { 229static struct platform_device omap3_igep2_dvi_connector_device = {
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 81de56251955..d24926e6340f 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1502,6 +1502,22 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
1502 } 1502 }
1503 1503
1504 /* 1504 /*
1505 * For some GPMC devices we still need to rely on the bootloader
1506 * timings because the devices can be connected via FPGA. So far
1507 * the list is smc91x on the omap2 SDP boards, and 8250 on zooms.
1508 * REVISIT: Add timing support from slls644g.pdf and from the
1509 * lan91c96 manual.
1510 */
1511 if (of_device_is_compatible(child, "ns16550a") ||
1512 of_device_is_compatible(child, "smsc,lan91c94") ||
1513 of_device_is_compatible(child, "smsc,lan91c111")) {
1514 dev_warn(&pdev->dev,
1515 "%s using bootloader timings on CS%d\n",
1516 child->name, cs);
1517 goto no_timings;
1518 }
1519
1520 /*
1505 * FIXME: gpmc_cs_request() will map the CS to an arbitary 1521 * FIXME: gpmc_cs_request() will map the CS to an arbitary
1506 * location in the gpmc address space. When booting with 1522 * location in the gpmc address space. When booting with
1507 * device-tree we want the NOR flash to be mapped to the 1523 * device-tree we want the NOR flash to be mapped to the
@@ -1529,6 +1545,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
1529 gpmc_read_timings_dt(child, &gpmc_t); 1545 gpmc_read_timings_dt(child, &gpmc_t);
1530 gpmc_cs_set_timings(cs, &gpmc_t); 1546 gpmc_cs_set_timings(cs, &gpmc_t);
1531 1547
1548no_timings:
1532 if (of_platform_device_create(child, NULL, &pdev->dev)) 1549 if (of_platform_device_create(child, NULL, &pdev->dev))
1533 return 0; 1550 return 0;
1534 1551
@@ -1541,42 +1558,6 @@ err:
1541 return ret; 1558 return ret;
1542} 1559}
1543 1560
1544/*
1545 * REVISIT: Add timing support from slls644g.pdf
1546 */
1547static int gpmc_probe_8250(struct platform_device *pdev,
1548 struct device_node *child)
1549{
1550 struct resource res;
1551 unsigned long base;
1552 int ret, cs;
1553
1554 if (of_property_read_u32(child, "reg", &cs) < 0) {
1555 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1556 child->full_name);
1557 return -ENODEV;
1558 }
1559
1560 if (of_address_to_resource(child, 0, &res) < 0) {
1561 dev_err(&pdev->dev, "%s has malformed 'reg' property\n",
1562 child->full_name);
1563 return -ENODEV;
1564 }
1565
1566 ret = gpmc_cs_request(cs, resource_size(&res), &base);
1567 if (ret < 0) {
1568 dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs);
1569 return ret;
1570 }
1571
1572 if (of_platform_device_create(child, NULL, &pdev->dev))
1573 return 0;
1574
1575 dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
1576
1577 return -ENODEV;
1578}
1579
1580static int gpmc_probe_dt(struct platform_device *pdev) 1561static int gpmc_probe_dt(struct platform_device *pdev)
1581{ 1562{
1582 int ret; 1563 int ret;
@@ -1618,10 +1599,9 @@ static int gpmc_probe_dt(struct platform_device *pdev)
1618 else if (of_node_cmp(child->name, "onenand") == 0) 1599 else if (of_node_cmp(child->name, "onenand") == 0)
1619 ret = gpmc_probe_onenand_child(pdev, child); 1600 ret = gpmc_probe_onenand_child(pdev, child);
1620 else if (of_node_cmp(child->name, "ethernet") == 0 || 1601 else if (of_node_cmp(child->name, "ethernet") == 0 ||
1621 of_node_cmp(child->name, "nor") == 0) 1602 of_node_cmp(child->name, "nor") == 0 ||
1603 of_node_cmp(child->name, "uart") == 0)
1622 ret = gpmc_probe_generic_child(pdev, child); 1604 ret = gpmc_probe_generic_child(pdev, child);
1623 else if (of_node_cmp(child->name, "8250") == 0)
1624 ret = gpmc_probe_8250(pdev, child);
1625 1605
1626 if (WARN(ret < 0, "%s: probing gpmc child %s failed\n", 1606 if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
1627 __func__, child->full_name)) 1607 __func__, child->full_name))
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index 8cc7d331437d..3e97c6c8ecf1 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -76,6 +76,13 @@ static inline void omap_barrier_reserve_memblock(void)
76{ } 76{ }
77#endif 77#endif
78 78
79#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
79void set_cntfreq(void); 80void set_cntfreq(void);
81#else
82static inline void set_cntfreq(void)
83{
84}
85#endif
86
80#endif /* __ASSEMBLER__ */ 87#endif /* __ASSEMBLER__ */
81#endif /* OMAP_ARCH_OMAP_SECURE_H */ 88#endif /* OMAP_ARCH_OMAP_SECURE_H */
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 57911430324e..b39efd46abf9 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -35,7 +35,6 @@
35#include "iomap.h" 35#include "iomap.h"
36#include "common.h" 36#include "common.h"
37#include "mmc.h" 37#include "mmc.h"
38#include "hsmmc.h"
39#include "prminst44xx.h" 38#include "prminst44xx.h"
40#include "prcm_mpu44xx.h" 39#include "prcm_mpu44xx.h"
41#include "omap4-sar-layout.h" 40#include "omap4-sar-layout.h"
@@ -284,59 +283,3 @@ skip_errata_init:
284 omap_wakeupgen_init(); 283 omap_wakeupgen_init();
285 irqchip_init(); 284 irqchip_init();
286} 285}
287
288#if defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
289static int omap4_twl6030_hsmmc_late_init(struct device *dev)
290{
291 int irq = 0;
292 struct platform_device *pdev = container_of(dev,
293 struct platform_device, dev);
294 struct omap_mmc_platform_data *pdata = dev->platform_data;
295
296 /* Setting MMC1 Card detect Irq */
297 if (pdev->id == 0) {
298 irq = twl6030_mmc_card_detect_config();
299 if (irq < 0) {
300 dev_err(dev, "%s: Error card detect config(%d)\n",
301 __func__, irq);
302 return irq;
303 }
304 pdata->slots[0].card_detect_irq = irq;
305 pdata->slots[0].card_detect = twl6030_mmc_card_detect;
306 }
307 return 0;
308}
309
310static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev)
311{
312 struct omap_mmc_platform_data *pdata;
313
314 /* dev can be null if CONFIG_MMC_OMAP_HS is not set */
315 if (!dev) {
316 pr_err("Failed %s\n", __func__);
317 return;
318 }
319 pdata = dev->platform_data;
320 pdata->init = omap4_twl6030_hsmmc_late_init;
321}
322
323int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
324{
325 struct omap2_hsmmc_info *c;
326
327 omap_hsmmc_init(controllers);
328 for (c = controllers; c->mmc; c++) {
329 /* pdev can be null if CONFIG_MMC_OMAP_HS is not set */
330 if (!c->pdev)
331 continue;
332 omap4_twl6030_hsmmc_set_late_init(&c->pdev->dev);
333 }
334
335 return 0;
336}
337#else
338int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
339{
340 return 0;
341}
342#endif
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 53f0735817bb..e0a398cf28d8 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -183,6 +183,10 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
183odbfd_exit1: 183odbfd_exit1:
184 kfree(hwmods); 184 kfree(hwmods);
185odbfd_exit: 185odbfd_exit:
186 /* if data/we are at fault.. load up a fail handler */
187 if (ret)
188 pdev->dev.pm_domain = &omap_device_fail_pm_domain;
189
186 return ret; 190 return ret;
187} 191}
188 192
@@ -604,6 +608,19 @@ static int _od_runtime_resume(struct device *dev)
604 608
605 return pm_generic_runtime_resume(dev); 609 return pm_generic_runtime_resume(dev);
606} 610}
611
612static int _od_fail_runtime_suspend(struct device *dev)
613{
614 dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__);
615 return -ENODEV;
616}
617
618static int _od_fail_runtime_resume(struct device *dev)
619{
620 dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__);
621 return -ENODEV;
622}
623
607#endif 624#endif
608 625
609#ifdef CONFIG_SUSPEND 626#ifdef CONFIG_SUSPEND
@@ -657,6 +674,13 @@ static int _od_resume_noirq(struct device *dev)
657#define _od_resume_noirq NULL 674#define _od_resume_noirq NULL
658#endif 675#endif
659 676
677struct dev_pm_domain omap_device_fail_pm_domain = {
678 .ops = {
679 SET_RUNTIME_PM_OPS(_od_fail_runtime_suspend,
680 _od_fail_runtime_resume, NULL)
681 }
682};
683
660struct dev_pm_domain omap_device_pm_domain = { 684struct dev_pm_domain omap_device_pm_domain = {
661 .ops = { 685 .ops = {
662 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, 686 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
index 17ca1aec2710..78c02b355179 100644
--- a/arch/arm/mach-omap2/omap_device.h
+++ b/arch/arm/mach-omap2/omap_device.h
@@ -29,6 +29,7 @@
29#include "omap_hwmod.h" 29#include "omap_hwmod.h"
30 30
31extern struct dev_pm_domain omap_device_pm_domain; 31extern struct dev_pm_domain omap_device_pm_domain;
32extern struct dev_pm_domain omap_device_fail_pm_domain;
32 33
33/* omap_device._state values */ 34/* omap_device._state values */
34#define OMAP_DEVICE_STATE_UNKNOWN 0 35#define OMAP_DEVICE_STATE_UNKNOWN 0
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e3f0ecaf87dd..8a1b5e0bad40 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v)
399} 399}
400 400
401/** 401/**
402 * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v 402 * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v
403 * @oh: struct omap_hwmod * 403 * @oh: struct omap_hwmod *
404 * @v: pointer to register contents to modify 404 * @v: pointer to register contents to modify
405 * 405 *
@@ -427,6 +427,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
427} 427}
428 428
429/** 429/**
430 * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v
431 * @oh: struct omap_hwmod *
432 * @v: pointer to register contents to modify
433 *
434 * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon
435 * error or 0 upon success.
436 */
437static int _clear_softreset(struct omap_hwmod *oh, u32 *v)
438{
439 u32 softrst_mask;
440
441 if (!oh->class->sysc ||
442 !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET))
443 return -EINVAL;
444
445 if (!oh->class->sysc->sysc_fields) {
446 WARN(1,
447 "omap_hwmod: %s: sysc_fields absent for sysconfig class\n",
448 oh->name);
449 return -EINVAL;
450 }
451
452 softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
453
454 *v &= ~softrst_mask;
455
456 return 0;
457}
458
459/**
430 * _wait_softreset_complete - wait for an OCP softreset to complete 460 * _wait_softreset_complete - wait for an OCP softreset to complete
431 * @oh: struct omap_hwmod * to wait on 461 * @oh: struct omap_hwmod * to wait on
432 * 462 *
@@ -785,6 +815,7 @@ static int _init_interface_clks(struct omap_hwmod *oh)
785 pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n", 815 pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n",
786 oh->name, os->clk); 816 oh->name, os->clk);
787 ret = -EINVAL; 817 ret = -EINVAL;
818 continue;
788 } 819 }
789 os->_clk = c; 820 os->_clk = c;
790 /* 821 /*
@@ -821,6 +852,7 @@ static int _init_opt_clks(struct omap_hwmod *oh)
821 pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n", 852 pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n",
822 oh->name, oc->clk); 853 oh->name, oc->clk);
823 ret = -EINVAL; 854 ret = -EINVAL;
855 continue;
824 } 856 }
825 oc->_clk = c; 857 oc->_clk = c;
826 /* 858 /*
@@ -1911,6 +1943,12 @@ static int _ocp_softreset(struct omap_hwmod *oh)
1911 ret = _set_softreset(oh, &v); 1943 ret = _set_softreset(oh, &v);
1912 if (ret) 1944 if (ret)
1913 goto dis_opt_clks; 1945 goto dis_opt_clks;
1946
1947 _write_sysconfig(v, oh);
1948 ret = _clear_softreset(oh, &v);
1949 if (ret)
1950 goto dis_opt_clks;
1951
1914 _write_sysconfig(v, oh); 1952 _write_sysconfig(v, oh);
1915 1953
1916 if (oh->class->sysc->srst_udelay) 1954 if (oh->class->sysc->srst_udelay)
@@ -2326,38 +2364,80 @@ static int _shutdown(struct omap_hwmod *oh)
2326 return 0; 2364 return 0;
2327} 2365}
2328 2366
2367static int of_dev_find_hwmod(struct device_node *np,
2368 struct omap_hwmod *oh)
2369{
2370 int count, i, res;
2371 const char *p;
2372
2373 count = of_property_count_strings(np, "ti,hwmods");
2374 if (count < 1)
2375 return -ENODEV;
2376
2377 for (i = 0; i < count; i++) {
2378 res = of_property_read_string_index(np, "ti,hwmods",
2379 i, &p);
2380 if (res)
2381 continue;
2382 if (!strcmp(p, oh->name)) {
2383 pr_debug("omap_hwmod: dt %s[%i] uses hwmod %s\n",
2384 np->name, i, oh->name);
2385 return i;
2386 }
2387 }
2388
2389 return -ENODEV;
2390}
2391
2329/** 2392/**
2330 * of_dev_hwmod_lookup - look up needed hwmod from dt blob 2393 * of_dev_hwmod_lookup - look up needed hwmod from dt blob
2331 * @np: struct device_node * 2394 * @np: struct device_node *
2332 * @oh: struct omap_hwmod * 2395 * @oh: struct omap_hwmod *
2396 * @index: index of the entry found
2397 * @found: struct device_node * found or NULL
2333 * 2398 *
2334 * Parse the dt blob and find out needed hwmod. Recursive function is 2399 * Parse the dt blob and find out needed hwmod. Recursive function is
2335 * implemented to take care hierarchical dt blob parsing. 2400 * implemented to take care hierarchical dt blob parsing.
2336 * Return: The device node on success or NULL on failure. 2401 * Return: Returns 0 on success, -ENODEV when not found.
2337 */ 2402 */
2338static struct device_node *of_dev_hwmod_lookup(struct device_node *np, 2403static int of_dev_hwmod_lookup(struct device_node *np,
2339 struct omap_hwmod *oh) 2404 struct omap_hwmod *oh,
2405 int *index,
2406 struct device_node **found)
2340{ 2407{
2341 struct device_node *np0 = NULL, *np1 = NULL; 2408 struct device_node *np0 = NULL;
2342 const char *p; 2409 int res;
2410
2411 res = of_dev_find_hwmod(np, oh);
2412 if (res >= 0) {
2413 *found = np;
2414 *index = res;
2415 return 0;
2416 }
2343 2417
2344 for_each_child_of_node(np, np0) { 2418 for_each_child_of_node(np, np0) {
2345 if (of_find_property(np0, "ti,hwmods", NULL)) { 2419 struct device_node *fc;
2346 p = of_get_property(np0, "ti,hwmods", NULL); 2420 int i;
2347 if (!strcmp(p, oh->name)) 2421
2348 return np0; 2422 res = of_dev_hwmod_lookup(np0, oh, &i, &fc);
2349 np1 = of_dev_hwmod_lookup(np0, oh); 2423 if (res == 0) {
2350 if (np1) 2424 *found = fc;
2351 return np1; 2425 *index = i;
2426 return 0;
2352 } 2427 }
2353 } 2428 }
2354 return NULL; 2429
2430 *found = NULL;
2431 *index = 0;
2432
2433 return -ENODEV;
2355} 2434}
2356 2435
2357/** 2436/**
2358 * _init_mpu_rt_base - populate the virtual address for a hwmod 2437 * _init_mpu_rt_base - populate the virtual address for a hwmod
2359 * @oh: struct omap_hwmod * to locate the virtual address 2438 * @oh: struct omap_hwmod * to locate the virtual address
2360 * @data: (unused, caller should pass NULL) 2439 * @data: (unused, caller should pass NULL)
2440 * @index: index of the reg entry iospace in device tree
2361 * @np: struct device_node * of the IP block's device node in the DT data 2441 * @np: struct device_node * of the IP block's device node in the DT data
2362 * 2442 *
2363 * Cache the virtual address used by the MPU to access this IP block's 2443 * Cache the virtual address used by the MPU to access this IP block's
@@ -2368,7 +2448,7 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np,
2368 * -ENXIO on absent or invalid register target address space. 2448 * -ENXIO on absent or invalid register target address space.
2369 */ 2449 */
2370static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, 2450static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2371 struct device_node *np) 2451 int index, struct device_node *np)
2372{ 2452{
2373 struct omap_hwmod_addr_space *mem; 2453 struct omap_hwmod_addr_space *mem;
2374 void __iomem *va_start = NULL; 2454 void __iomem *va_start = NULL;
@@ -2390,13 +2470,17 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2390 if (!np) 2470 if (!np)
2391 return -ENXIO; 2471 return -ENXIO;
2392 2472
2393 va_start = of_iomap(np, oh->mpu_rt_idx); 2473 va_start = of_iomap(np, index + oh->mpu_rt_idx);
2394 } else { 2474 } else {
2395 va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); 2475 va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
2396 } 2476 }
2397 2477
2398 if (!va_start) { 2478 if (!va_start) {
2399 pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name); 2479 if (mem)
2480 pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name);
2481 else
2482 pr_err("omap_hwmod: %s: Missing dt reg%i for %s\n",
2483 oh->name, index, np->full_name);
2400 return -ENXIO; 2484 return -ENXIO;
2401 } 2485 }
2402 2486
@@ -2422,17 +2506,29 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2422 */ 2506 */
2423static int __init _init(struct omap_hwmod *oh, void *data) 2507static int __init _init(struct omap_hwmod *oh, void *data)
2424{ 2508{
2425 int r; 2509 int r, index;
2426 struct device_node *np = NULL; 2510 struct device_node *np = NULL;
2427 2511
2428 if (oh->_state != _HWMOD_STATE_REGISTERED) 2512 if (oh->_state != _HWMOD_STATE_REGISTERED)
2429 return 0; 2513 return 0;
2430 2514
2431 if (of_have_populated_dt()) 2515 if (of_have_populated_dt()) {
2432 np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); 2516 struct device_node *bus;
2517
2518 bus = of_find_node_by_name(NULL, "ocp");
2519 if (!bus)
2520 return -ENODEV;
2521
2522 r = of_dev_hwmod_lookup(bus, oh, &index, &np);
2523 if (r)
2524 pr_debug("omap_hwmod: %s missing dt data\n", oh->name);
2525 else if (np && index)
2526 pr_warn("omap_hwmod: %s using broken dt data from %s\n",
2527 oh->name, np->name);
2528 }
2433 2529
2434 if (oh->class->sysc) { 2530 if (oh->class->sysc) {
2435 r = _init_mpu_rt_base(oh, NULL, np); 2531 r = _init_mpu_rt_base(oh, NULL, index, np);
2436 if (r < 0) { 2532 if (r < 0) {
2437 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", 2533 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
2438 oh->name); 2534 oh->name);
@@ -3169,6 +3265,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh)
3169 goto error; 3265 goto error;
3170 _write_sysconfig(v, oh); 3266 _write_sysconfig(v, oh);
3171 3267
3268 ret = _clear_softreset(oh, &v);
3269 if (ret)
3270 goto error;
3271 _write_sysconfig(v, oh);
3272
3172error: 3273error:
3173 return ret; 3274 return ret;
3174} 3275}
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 9e56fabd7fa3..d33742908f97 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1943,7 +1943,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
1943 .syss_offs = 0x0014, 1943 .syss_offs = 0x0014,
1944 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | 1944 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
1945 SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | 1945 SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
1946 SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), 1946 SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
1947 SYSS_HAS_RESET_STATUS),
1947 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 1948 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1948 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), 1949 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
1949 .sysc_fields = &omap_hwmod_sysc_type1, 1950 .sysc_fields = &omap_hwmod_sysc_type1,
@@ -2021,15 +2022,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
2021 * hence HWMOD_SWSUP_MSTANDBY 2022 * hence HWMOD_SWSUP_MSTANDBY
2022 */ 2023 */
2023 2024
2024 /* 2025 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
2025 * During system boot; If the hwmod framework resets the module
2026 * the module will have smart idle settings; which can lead to deadlock
2027 * (above Errata Id:i660); so, dont reset the module during boot;
2028 * Use HWMOD_INIT_NO_RESET.
2029 */
2030
2031 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
2032 HWMOD_INIT_NO_RESET,
2033}; 2026};
2034 2027
2035/* 2028/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 1e5b12cb8246..3318cae96e7d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2937,7 +2937,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = {
2937 .sysc_offs = 0x0010, 2937 .sysc_offs = 0x0010,
2938 .syss_offs = 0x0014, 2938 .syss_offs = 0x0014,
2939 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | 2939 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE |
2940 SYSC_HAS_SOFTRESET), 2940 SYSC_HAS_SOFTRESET | SYSC_HAS_RESET_STATUS),
2941 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 2941 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
2942 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | 2942 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
2943 MSTANDBY_SMART | MSTANDBY_SMART_WKUP), 2943 MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
@@ -3001,15 +3001,7 @@ static struct omap_hwmod omap44xx_usb_host_hs_hwmod = {
3001 * hence HWMOD_SWSUP_MSTANDBY 3001 * hence HWMOD_SWSUP_MSTANDBY
3002 */ 3002 */
3003 3003
3004 /* 3004 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
3005 * During system boot; If the hwmod framework resets the module
3006 * the module will have smart idle settings; which can lead to deadlock
3007 * (above Errata Id:i660); so, dont reset the module during boot;
3008 * Use HWMOD_INIT_NO_RESET.
3009 */
3010
3011 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
3012 HWMOD_INIT_NO_RESET,
3013}; 3005};
3014 3006
3015/* 3007/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 9e08d6994a0b..e297d6231c3a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -1544,7 +1544,8 @@ static struct omap_hwmod_class_sysconfig omap54xx_usb_host_hs_sysc = {
1544 .rev_offs = 0x0000, 1544 .rev_offs = 0x0000,
1545 .sysc_offs = 0x0010, 1545 .sysc_offs = 0x0010,
1546 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | 1546 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
1547 SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), 1547 SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
1548 SYSC_HAS_RESET_STATUS),
1548 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 1549 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1549 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | 1550 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
1550 MSTANDBY_SMART | MSTANDBY_SMART_WKUP), 1551 MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
@@ -1598,15 +1599,7 @@ static struct omap_hwmod omap54xx_usb_host_hs_hwmod = {
1598 * hence HWMOD_SWSUP_MSTANDBY 1599 * hence HWMOD_SWSUP_MSTANDBY
1599 */ 1600 */
1600 1601
1601 /* 1602 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
1602 * During system boot; If the hwmod framework resets the module
1603 * the module will have smart idle settings; which can lead to deadlock
1604 * (above Errata Id:i660); so, dont reset the module during boot;
1605 * Use HWMOD_INIT_NO_RESET.
1606 */
1607
1608 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
1609 HWMOD_INIT_NO_RESET,
1610 .main_clk = "l3init_60m_fclk", 1603 .main_clk = "l3init_60m_fclk",
1611 .prcm = { 1604 .prcm = {
1612 .omap4 = { 1605 .omap4 = {
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 10c71450cf63..39f020c982e8 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -139,6 +139,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
139 139
140static struct pdata_init pdata_quirks[] __initdata = { 140static struct pdata_init pdata_quirks[] __initdata = {
141#ifdef CONFIG_ARCH_OMAP3 141#ifdef CONFIG_ARCH_OMAP3
142 { "nokia,omap3-n900", hsmmc2_internal_input_clk, },
142 { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, 143 { "nokia,omap3-n9", hsmmc2_internal_input_clk, },
143 { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, 144 { "nokia,omap3-n950", hsmmc2_internal_input_clk, },
144 { "isee,omap3-igep0020", omap3_igep0020_legacy_init, }, 145 { "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 93b80e5da8d4..1f3770a8a728 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -120,7 +120,7 @@ static void omap3_save_secure_ram_context(void)
120 * will hang the system. 120 * will hang the system.
121 */ 121 */
122 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); 122 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
123 ret = _omap_save_secure_sram((u32 *) 123 ret = _omap_save_secure_sram((u32 *)(unsigned long)
124 __pa(omap3_secure_ram_storage)); 124 __pa(omap3_secure_ram_storage));
125 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); 125 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
126 /* Following is for error tracking, it should not happen */ 126 /* Following is for error tracking, it should not happen */
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index e233dfcbc186..93a2a6e4260f 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -128,7 +128,8 @@ skip_voltdm:
128 for (i = 0; i < pwrdm->banks; i++) 128 for (i = 0; i < pwrdm->banks; i++)
129 pwrdm->ret_mem_off_counter[i] = 0; 129 pwrdm->ret_mem_off_counter[i] = 0;
130 130
131 arch_pwrdm->pwrdm_wait_transition(pwrdm); 131 if (arch_pwrdm && arch_pwrdm->pwrdm_wait_transition)
132 arch_pwrdm->pwrdm_wait_transition(pwrdm);
132 pwrdm->state = pwrdm_read_pwrst(pwrdm); 133 pwrdm->state = pwrdm_read_pwrst(pwrdm);
133 pwrdm->state_counter[pwrdm->state] = 1; 134 pwrdm->state_counter[pwrdm->state] = 1;
134 135
diff --git a/arch/arm/mach-omap2/prm44xx_54xx.h b/arch/arm/mach-omap2/prm44xx_54xx.h
index 7a976065e138..8d95aa543ef5 100644
--- a/arch/arm/mach-omap2/prm44xx_54xx.h
+++ b/arch/arm/mach-omap2/prm44xx_54xx.h
@@ -43,7 +43,7 @@ extern void omap4_prm_vcvp_write(u32 val, u8 offset);
43extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset); 43extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
44 44
45#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 45#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
46 defined(CONFIG_SOC_DRA7XX) 46 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
47void omap44xx_prm_reconfigure_io_chain(void); 47void omap44xx_prm_reconfigure_io_chain(void);
48#else 48#else
49static inline void omap44xx_prm_reconfigure_io_chain(void) 49static inline void omap44xx_prm_reconfigure_io_chain(void)
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index 0d5dd646f61f..263b15249b5b 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -13,6 +13,7 @@
13 13
14#include <mach/regs-ost.h> 14#include <mach/regs-ost.h>
15#include <mach/reset.h> 15#include <mach/reset.h>
16#include <mach/smemc.h>
16 17
17unsigned int reset_status; 18unsigned int reset_status;
18EXPORT_SYMBOL(reset_status); 19EXPORT_SYMBOL(reset_status);
@@ -81,6 +82,12 @@ static void do_hw_reset(void)
81 writel_relaxed(OSSR_M3, OSSR); 82 writel_relaxed(OSSR_M3, OSSR);
82 /* ... in 100 ms */ 83 /* ... in 100 ms */
83 writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); 84 writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3);
85 /*
86 * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71)
87 * we put SDRAM into self-refresh to prevent that
88 */
89 while (1)
90 writel_relaxed(MDREFR_SLFRSH, MDREFR);
84} 91}
85 92
86void pxa_restart(enum reboot_mode mode, const char *cmd) 93void pxa_restart(enum reboot_mode mode, const char *cmd)
@@ -104,4 +111,3 @@ void pxa_restart(enum reboot_mode mode, const char *cmd)
104 break; 111 break;
105 } 112 }
106} 113}
107
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 0206b915a6f6..ef5557b807ed 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -425,57 +425,57 @@ static struct platform_device tosa_power_device = {
425 * Tosa Keyboard 425 * Tosa Keyboard
426 */ 426 */
427static const uint32_t tosakbd_keymap[] = { 427static const uint32_t tosakbd_keymap[] = {
428 KEY(0, 2, KEY_W), 428 KEY(0, 1, KEY_W),
429 KEY(0, 6, KEY_K), 429 KEY(0, 5, KEY_K),
430 KEY(0, 7, KEY_BACKSPACE), 430 KEY(0, 6, KEY_BACKSPACE),
431 KEY(0, 8, KEY_P), 431 KEY(0, 7, KEY_P),
432 KEY(1, 1, KEY_Q), 432 KEY(1, 0, KEY_Q),
433 KEY(1, 2, KEY_E), 433 KEY(1, 1, KEY_E),
434 KEY(1, 3, KEY_T), 434 KEY(1, 2, KEY_T),
435 KEY(1, 4, KEY_Y), 435 KEY(1, 3, KEY_Y),
436 KEY(1, 6, KEY_O), 436 KEY(1, 5, KEY_O),
437 KEY(1, 7, KEY_I), 437 KEY(1, 6, KEY_I),
438 KEY(1, 8, KEY_COMMA), 438 KEY(1, 7, KEY_COMMA),
439 KEY(2, 1, KEY_A), 439 KEY(2, 0, KEY_A),
440 KEY(2, 2, KEY_D), 440 KEY(2, 1, KEY_D),
441 KEY(2, 3, KEY_G), 441 KEY(2, 2, KEY_G),
442 KEY(2, 4, KEY_U), 442 KEY(2, 3, KEY_U),
443 KEY(2, 6, KEY_L), 443 KEY(2, 5, KEY_L),
444 KEY(2, 7, KEY_ENTER), 444 KEY(2, 6, KEY_ENTER),
445 KEY(2, 8, KEY_DOT), 445 KEY(2, 7, KEY_DOT),
446 KEY(3, 1, KEY_Z), 446 KEY(3, 0, KEY_Z),
447 KEY(3, 2, KEY_C), 447 KEY(3, 1, KEY_C),
448 KEY(3, 3, KEY_V), 448 KEY(3, 2, KEY_V),
449 KEY(3, 4, KEY_J), 449 KEY(3, 3, KEY_J),
450 KEY(3, 5, TOSA_KEY_ADDRESSBOOK), 450 KEY(3, 4, TOSA_KEY_ADDRESSBOOK),
451 KEY(3, 6, TOSA_KEY_CANCEL), 451 KEY(3, 5, TOSA_KEY_CANCEL),
452 KEY(3, 7, TOSA_KEY_CENTER), 452 KEY(3, 6, TOSA_KEY_CENTER),
453 KEY(3, 8, TOSA_KEY_OK), 453 KEY(3, 7, TOSA_KEY_OK),
454 KEY(3, 9, KEY_LEFTSHIFT), 454 KEY(3, 8, KEY_LEFTSHIFT),
455 KEY(4, 1, KEY_S), 455 KEY(4, 0, KEY_S),
456 KEY(4, 2, KEY_R), 456 KEY(4, 1, KEY_R),
457 KEY(4, 3, KEY_B), 457 KEY(4, 2, KEY_B),
458 KEY(4, 4, KEY_N), 458 KEY(4, 3, KEY_N),
459 KEY(4, 5, TOSA_KEY_CALENDAR), 459 KEY(4, 4, TOSA_KEY_CALENDAR),
460 KEY(4, 6, TOSA_KEY_HOMEPAGE), 460 KEY(4, 5, TOSA_KEY_HOMEPAGE),
461 KEY(4, 7, KEY_LEFTCTRL), 461 KEY(4, 6, KEY_LEFTCTRL),
462 KEY(4, 8, TOSA_KEY_LIGHT), 462 KEY(4, 7, TOSA_KEY_LIGHT),
463 KEY(4, 10, KEY_RIGHTSHIFT), 463 KEY(4, 9, KEY_RIGHTSHIFT),
464 KEY(5, 1, KEY_TAB), 464 KEY(5, 0, KEY_TAB),
465 KEY(5, 2, KEY_SLASH), 465 KEY(5, 1, KEY_SLASH),
466 KEY(5, 3, KEY_H), 466 KEY(5, 2, KEY_H),
467 KEY(5, 4, KEY_M), 467 KEY(5, 3, KEY_M),
468 KEY(5, 5, TOSA_KEY_MENU), 468 KEY(5, 4, TOSA_KEY_MENU),
469 KEY(5, 7, KEY_UP), 469 KEY(5, 6, KEY_UP),
470 KEY(5, 11, TOSA_KEY_FN), 470 KEY(5, 10, TOSA_KEY_FN),
471 KEY(6, 1, KEY_X), 471 KEY(6, 0, KEY_X),
472 KEY(6, 2, KEY_F), 472 KEY(6, 1, KEY_F),
473 KEY(6, 3, KEY_SPACE), 473 KEY(6, 2, KEY_SPACE),
474 KEY(6, 4, KEY_APOSTROPHE), 474 KEY(6, 3, KEY_APOSTROPHE),
475 KEY(6, 5, TOSA_KEY_MAIL), 475 KEY(6, 4, TOSA_KEY_MAIL),
476 KEY(6, 6, KEY_LEFT), 476 KEY(6, 5, KEY_LEFT),
477 KEY(6, 7, KEY_DOWN), 477 KEY(6, 6, KEY_DOWN),
478 KEY(6, 8, KEY_RIGHT), 478 KEY(6, 7, KEY_RIGHT),
479}; 479};
480 480
481static struct matrix_keymap_data tosakbd_keymap_data = { 481static struct matrix_keymap_data tosakbd_keymap_data = {
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index 037100a1563a..aee77f06f887 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -10,6 +10,7 @@ config ARCH_SOCFPGA
10 select GENERIC_CLOCKEVENTS 10 select GENERIC_CLOCKEVENTS
11 select GPIO_PL061 if GPIOLIB 11 select GPIO_PL061 if GPIOLIB
12 select HAVE_ARM_SCU 12 select HAVE_ARM_SCU
13 select HAVE_ARM_TWD if SMP
13 select HAVE_SMP 14 select HAVE_SMP
14 select MFD_SYSCON 15 select MFD_SYSCON
15 select SPARSE_IRQ 16 select SPARSE_IRQ
diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c
index d4639c506622..3a9c1f1c219d 100644
--- a/arch/arm/mach-tegra/fuse.c
+++ b/arch/arm/mach-tegra/fuse.c
@@ -198,10 +198,12 @@ void __init tegra_init_fuse(void)
198 switch (tegra_chip_id) { 198 switch (tegra_chip_id) {
199 case TEGRA20: 199 case TEGRA20:
200 tegra20_fuse_init_randomness(); 200 tegra20_fuse_init_randomness();
201 break;
201 case TEGRA30: 202 case TEGRA30:
202 case TEGRA114: 203 case TEGRA114:
203 default: 204 default:
204 tegra30_fuse_init_randomness(); 205 tegra30_fuse_init_randomness();
206 break;
205 } 207 }
206 208
207 pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n", 209 pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n",
@@ -209,13 +211,3 @@ void __init tegra_init_fuse(void)
209 tegra_sku_id, tegra_cpu_process_id, 211 tegra_sku_id, tegra_cpu_process_id,
210 tegra_core_process_id); 212 tegra_core_process_id);
211} 213}
212
213unsigned long long tegra_chip_uid(void)
214{
215 unsigned long long lo, hi;
216
217 lo = tegra_fuse_readl(FUSE_UID_LOW);
218 hi = tegra_fuse_readl(FUSE_UID_HIGH);
219 return (hi << 32ull) | lo;
220}
221EXPORT_SYMBOL(tegra_chip_uid);
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 2e85c1e72535..12c7e5c03ea4 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -140,6 +140,10 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
140 /* Requires call-back bindings. */ 140 /* Requires call-back bindings. */
141 OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata), 141 OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
142 /* Requires DMA bindings. */ 142 /* Requires DMA bindings. */
143 OF_DEV_AUXDATA("arm,pl18x", 0x80126000, "sdi0", &mop500_sdi0_data),
144 OF_DEV_AUXDATA("arm,pl18x", 0x80118000, "sdi1", &mop500_sdi1_data),
145 OF_DEV_AUXDATA("arm,pl18x", 0x80005000, "sdi2", &mop500_sdi2_data),
146 OF_DEV_AUXDATA("arm,pl18x", 0x80114000, "sdi4", &mop500_sdi4_data),
143 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000, 147 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
144 "ux500-msp-i2s.0", &msp0_platform_data), 148 "ux500-msp-i2s.0", &msp0_platform_data),
145 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000, 149 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000,
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index 033d34dcbd3f..c26ef5b92ca7 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -53,6 +53,11 @@
53#define A15_BX_ADDR0 0x68 53#define A15_BX_ADDR0 0x68
54#define A7_BX_ADDR0 0x78 54#define A7_BX_ADDR0 0x78
55 55
56/* SPC CPU/cluster reset statue */
57#define STANDBYWFI_STAT 0x3c
58#define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu))
59#define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu)))
60
56/* SPC system config interface registers */ 61/* SPC system config interface registers */
57#define SYSCFG_WDATA 0x70 62#define SYSCFG_WDATA 0x70
58#define SYSCFG_RDATA 0x74 63#define SYSCFG_RDATA 0x74
@@ -213,6 +218,41 @@ void ve_spc_powerdown(u32 cluster, bool enable)
213 writel_relaxed(enable, info->baseaddr + pwdrn_reg); 218 writel_relaxed(enable, info->baseaddr + pwdrn_reg);
214} 219}
215 220
221static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
222{
223 return cluster_is_a15(cluster) ?
224 STANDBYWFI_STAT_A15_CPU_MASK(cpu)
225 : STANDBYWFI_STAT_A7_CPU_MASK(cpu);
226}
227
228/**
229 * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
230 *
231 * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
232 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
233 *
234 * @return: non-zero if and only if the specified CPU is in WFI
235 *
236 * Take care when interpreting the result of this function: a CPU might
237 * be in WFI temporarily due to idle, and is not necessarily safely
238 * parked.
239 */
240int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
241{
242 int ret;
243 u32 mask = standbywfi_cpu_mask(cpu, cluster);
244
245 if (cluster >= MAX_CLUSTERS)
246 return 1;
247
248 ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT);
249
250 pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n",
251 __func__, STANDBYWFI_STAT, ret, mask);
252
253 return ret & mask;
254}
255
216static int ve_spc_get_performance(int cluster, u32 *freq) 256static int ve_spc_get_performance(int cluster, u32 *freq)
217{ 257{
218 struct ve_spc_opp *opps = info->opps[cluster]; 258 struct ve_spc_opp *opps = info->opps[cluster];
diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h
index dbd44c3720f9..793d065243b9 100644
--- a/arch/arm/mach-vexpress/spc.h
+++ b/arch/arm/mach-vexpress/spc.h
@@ -20,5 +20,6 @@ void ve_spc_global_wakeup_irq(bool set);
20void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); 20void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
21void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); 21void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
22void ve_spc_powerdown(u32 cluster, bool enable); 22void ve_spc_powerdown(u32 cluster, bool enable);
23int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster);
23 24
24#endif 25#endif
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 05a364c5077a..29e7785a54bc 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -12,6 +12,7 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/delay.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/io.h> 17#include <linux/io.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -32,11 +33,17 @@
32#include "spc.h" 33#include "spc.h"
33 34
34/* SCC conf registers */ 35/* SCC conf registers */
36#define RESET_CTRL 0x018
37#define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu)))
38#define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu)))
39
35#define A15_CONF 0x400 40#define A15_CONF 0x400
36#define A7_CONF 0x500 41#define A7_CONF 0x500
37#define SYS_INFO 0x700 42#define SYS_INFO 0x700
38#define SPC_BASE 0xb00 43#define SPC_BASE 0xb00
39 44
45static void __iomem *scc;
46
40/* 47/*
41 * We can't use regular spinlocks. In the switcher case, it is possible 48 * We can't use regular spinlocks. In the switcher case, it is possible
42 * for an outbound CPU to call power_down() after its inbound counterpart 49 * for an outbound CPU to call power_down() after its inbound counterpart
@@ -190,6 +197,55 @@ static void tc2_pm_power_down(void)
190 tc2_pm_down(0); 197 tc2_pm_down(0);
191} 198}
192 199
200static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
201{
202 u32 mask = cluster ?
203 RESET_A7_NCORERESET(cpu)
204 : RESET_A15_NCORERESET(cpu);
205
206 return !(readl_relaxed(scc + RESET_CTRL) & mask);
207}
208
209#define POLL_MSEC 10
210#define TIMEOUT_MSEC 1000
211
212static int tc2_pm_power_down_finish(unsigned int cpu, unsigned int cluster)
213{
214 unsigned tries;
215
216 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
217 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
218
219 for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
220 /*
221 * Only examine the hardware state if the target CPU has
222 * caught up at least as far as tc2_pm_down():
223 */
224 if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) {
225 pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
226 __func__, cpu, cluster,
227 readl_relaxed(scc + RESET_CTRL));
228
229 /*
230 * We need the CPU to reach WFI, but the power
231 * controller may put the cluster in reset and
232 * power it off as soon as that happens, before
233 * we have a chance to see STANDBYWFI.
234 *
235 * So we need to check for both conditions:
236 */
237 if (tc2_core_in_reset(cpu, cluster) ||
238 ve_spc_cpu_in_wfi(cpu, cluster))
239 return 0; /* success: the CPU is halted */
240 }
241
242 /* Otherwise, wait and retry: */
243 msleep(POLL_MSEC);
244 }
245
246 return -ETIMEDOUT; /* timeout */
247}
248
193static void tc2_pm_suspend(u64 residency) 249static void tc2_pm_suspend(u64 residency)
194{ 250{
195 unsigned int mpidr, cpu, cluster; 251 unsigned int mpidr, cpu, cluster;
@@ -232,10 +288,11 @@ static void tc2_pm_powered_up(void)
232} 288}
233 289
234static const struct mcpm_platform_ops tc2_pm_power_ops = { 290static const struct mcpm_platform_ops tc2_pm_power_ops = {
235 .power_up = tc2_pm_power_up, 291 .power_up = tc2_pm_power_up,
236 .power_down = tc2_pm_power_down, 292 .power_down = tc2_pm_power_down,
237 .suspend = tc2_pm_suspend, 293 .power_down_finish = tc2_pm_power_down_finish,
238 .powered_up = tc2_pm_powered_up, 294 .suspend = tc2_pm_suspend,
295 .powered_up = tc2_pm_powered_up,
239}; 296};
240 297
241static bool __init tc2_pm_usage_count_init(void) 298static bool __init tc2_pm_usage_count_init(void)
@@ -269,7 +326,6 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
269static int __init tc2_pm_init(void) 326static int __init tc2_pm_init(void)
270{ 327{
271 int ret, irq; 328 int ret, irq;
272 void __iomem *scc;
273 u32 a15_cluster_id, a7_cluster_id, sys_info; 329 u32 a15_cluster_id, a7_cluster_id, sys_info;
274 struct device_node *np; 330 struct device_node *np;
275 331
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 79f8b39801a8..f61a5707823a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -9,6 +9,7 @@
9 * 9 *
10 * DMA uncached mapping support. 10 * DMA uncached mapping support.
11 */ 11 */
12#include <linux/bootmem.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/mm.h> 14#include <linux/mm.h>
14#include <linux/gfp.h> 15#include <linux/gfp.h>
@@ -157,6 +158,44 @@ struct dma_map_ops arm_coherent_dma_ops = {
157}; 158};
158EXPORT_SYMBOL(arm_coherent_dma_ops); 159EXPORT_SYMBOL(arm_coherent_dma_ops);
159 160
161static int __dma_supported(struct device *dev, u64 mask, bool warn)
162{
163 unsigned long max_dma_pfn;
164
165 /*
166 * If the mask allows for more memory than we can address,
167 * and we actually have that much memory, then we must
168 * indicate that DMA to this device is not supported.
169 */
170 if (sizeof(mask) != sizeof(dma_addr_t) &&
171 mask > (dma_addr_t)~0 &&
172 dma_to_pfn(dev, ~0) < max_pfn) {
173 if (warn) {
174 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
175 mask);
176 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
177 }
178 return 0;
179 }
180
181 max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
182
183 /*
184 * Translate the device's DMA mask to a PFN limit. This
185 * PFN number includes the page which we can DMA to.
186 */
187 if (dma_to_pfn(dev, mask) < max_dma_pfn) {
188 if (warn)
189 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
190 mask,
191 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
192 max_dma_pfn + 1);
193 return 0;
194 }
195
196 return 1;
197}
198
160static u64 get_coherent_dma_mask(struct device *dev) 199static u64 get_coherent_dma_mask(struct device *dev)
161{ 200{
162 u64 mask = (u64)DMA_BIT_MASK(32); 201 u64 mask = (u64)DMA_BIT_MASK(32);
@@ -173,32 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
173 return 0; 212 return 0;
174 } 213 }
175 214
176 /* 215 if (!__dma_supported(dev, mask, true))
177 * If the mask allows for more memory than we can address,
178 * and we actually have that much memory, then fail the
179 * allocation.
180 */
181 if (sizeof(mask) != sizeof(dma_addr_t) &&
182 mask > (dma_addr_t)~0 &&
183 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
184 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
185 mask);
186 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
187 return 0;
188 }
189
190 /*
191 * Now check that the mask, when translated to a PFN,
192 * fits within the allowable addresses which we can
193 * allocate.
194 */
195 if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
196 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
197 mask,
198 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
199 arm_dma_pfn_limit + 1);
200 return 0; 216 return 0;
201 }
202 } 217 }
203 218
204 return mask; 219 return mask;
@@ -1027,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1027 */ 1042 */
1028int dma_supported(struct device *dev, u64 mask) 1043int dma_supported(struct device *dev, u64 mask)
1029{ 1044{
1030 unsigned long limit; 1045 return __dma_supported(dev, mask, false);
1031
1032 /*
1033 * If the mask allows for more memory than we can address,
1034 * and we actually have that much memory, then we must
1035 * indicate that DMA to this device is not supported.
1036 */
1037 if (sizeof(mask) != sizeof(dma_addr_t) &&
1038 mask > (dma_addr_t)~0 &&
1039 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
1040 return 0;
1041
1042 /*
1043 * Translate the device's DMA mask to a PFN limit. This
1044 * PFN number includes the page which we can DMA to.
1045 */
1046 limit = dma_to_pfn(dev, mask);
1047
1048 if (limit < arm_dma_pfn_limit)
1049 return 0;
1050
1051 return 1;
1052} 1046}
1053EXPORT_SYMBOL(dma_supported); 1047EXPORT_SYMBOL(dma_supported);
1054 1048
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3e8f106ee5fe..1f7b19a47060 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
229#ifdef CONFIG_ZONE_DMA 229#ifdef CONFIG_ZONE_DMA
230 if (mdesc->dma_zone_size) { 230 if (mdesc->dma_zone_size) {
231 arm_dma_zone_size = mdesc->dma_zone_size; 231 arm_dma_zone_size = mdesc->dma_zone_size;
232 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 232 arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
233 } else 233 } else
234 arm_dma_limit = 0xffffffff; 234 arm_dma_limit = 0xffffffff;
235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index d27158c38eb0..5e85ed371364 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -146,7 +146,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
146 146
147 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 147 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
148 info.length = len; 148 info.length = len;
149 info.low_limit = PAGE_SIZE; 149 info.low_limit = FIRST_USER_ADDRESS;
150 info.high_limit = mm->mmap_base; 150 info.high_limit = mm->mmap_base;
151 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 151 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
152 info.align_offset = pgoff << PAGE_SHIFT; 152 info.align_offset = pgoff << PAGE_SHIFT;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 78eeeca78f5a..580ef2de82d7 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -558,8 +558,8 @@ static void __init build_mem_type_table(void)
558 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; 558 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
559 break; 559 break;
560 } 560 }
561 printk("Memory policy: ECC %sabled, Data cache %s\n", 561 pr_info("Memory policy: %sData cache %s\n",
562 ecc_mask ? "en" : "dis", cp->policy); 562 ecc_mask ? "ECC enabled, " : "", cp->policy);
563 563
564 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 564 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
565 struct mem_type *t = &mem_types[i]; 565 struct mem_type *t = &mem_types[i];
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 5c668b7a31f9..55764a7ef1f0 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -18,6 +18,7 @@
18#include <asm/mach/arch.h> 18#include <asm/mach/arch.h>
19#include <asm/cputype.h> 19#include <asm/cputype.h>
20#include <asm/mpu.h> 20#include <asm/mpu.h>
21#include <asm/procinfo.h>
21 22
22#include "mm.h" 23#include "mm.h"
23 24
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 0acb089d0f70..1046b373d1ae 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -87,7 +87,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
87 init_pud = pud_offset(init_pgd, 0); 87 init_pud = pud_offset(init_pgd, 0);
88 init_pmd = pmd_offset(init_pud, 0); 88 init_pmd = pmd_offset(init_pud, 0);
89 init_pte = pte_offset_map(init_pmd, 0); 89 init_pte = pte_offset_map(init_pmd, 0);
90 set_pte_ext(new_pte, *init_pte, 0); 90 set_pte_ext(new_pte + 0, init_pte[0], 0);
91 set_pte_ext(new_pte + 1, init_pte[1], 0);
91 pte_unmap(init_pte); 92 pte_unmap(init_pte);
92 pte_unmap(new_pte); 93 pte_unmap(new_pte);
93 } 94 }
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 60920f62fdf5..bd1781979a39 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -92,7 +92,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
92 92
93/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ 93/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
94.globl cpu_v7_suspend_size 94.globl cpu_v7_suspend_size
95.equ cpu_v7_suspend_size, 4 * 8 95.equ cpu_v7_suspend_size, 4 * 9
96#ifdef CONFIG_ARM_CPU_SUSPEND 96#ifdef CONFIG_ARM_CPU_SUSPEND
97ENTRY(cpu_v7_do_suspend) 97ENTRY(cpu_v7_do_suspend)
98 stmfd sp!, {r4 - r10, lr} 98 stmfd sp!, {r4 - r10, lr}
@@ -101,13 +101,17 @@ ENTRY(cpu_v7_do_suspend)
101 stmia r0!, {r4 - r5} 101 stmia r0!, {r4 - r5}
102#ifdef CONFIG_MMU 102#ifdef CONFIG_MMU
103 mrc p15, 0, r6, c3, c0, 0 @ Domain ID 103 mrc p15, 0, r6, c3, c0, 0 @ Domain ID
104#ifdef CONFIG_ARM_LPAE
105 mrrc p15, 1, r5, r7, c2 @ TTB 1
106#else
104 mrc p15, 0, r7, c2, c0, 1 @ TTB 1 107 mrc p15, 0, r7, c2, c0, 1 @ TTB 1
108#endif
105 mrc p15, 0, r11, c2, c0, 2 @ TTB control register 109 mrc p15, 0, r11, c2, c0, 2 @ TTB control register
106#endif 110#endif
107 mrc p15, 0, r8, c1, c0, 0 @ Control register 111 mrc p15, 0, r8, c1, c0, 0 @ Control register
108 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register 112 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
109 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control 113 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
110 stmia r0, {r6 - r11} 114 stmia r0, {r5 - r11}
111 ldmfd sp!, {r4 - r10, pc} 115 ldmfd sp!, {r4 - r10, pc}
112ENDPROC(cpu_v7_do_suspend) 116ENDPROC(cpu_v7_do_suspend)
113 117
@@ -118,16 +122,19 @@ ENTRY(cpu_v7_do_resume)
118 ldmia r0!, {r4 - r5} 122 ldmia r0!, {r4 - r5}
119 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID 123 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
120 mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID 124 mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID
121 ldmia r0, {r6 - r11} 125 ldmia r0, {r5 - r11}
122#ifdef CONFIG_MMU 126#ifdef CONFIG_MMU
123 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs 127 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
124 mcr p15, 0, r6, c3, c0, 0 @ Domain ID 128 mcr p15, 0, r6, c3, c0, 0 @ Domain ID
125#ifndef CONFIG_ARM_LPAE 129#ifdef CONFIG_ARM_LPAE
130 mcrr p15, 0, r1, ip, c2 @ TTB 0
131 mcrr p15, 1, r5, r7, c2 @ TTB 1
132#else
126 ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) 133 ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP)
127 ALT_UP(orr r1, r1, #TTB_FLAGS_UP) 134 ALT_UP(orr r1, r1, #TTB_FLAGS_UP)
128#endif
129 mcr p15, 0, r1, c2, c0, 0 @ TTB 0 135 mcr p15, 0, r1, c2, c0, 0 @ TTB 0
130 mcr p15, 0, r7, c2, c0, 1 @ TTB 1 136 mcr p15, 0, r7, c2, c0, 1 @ TTB 1
137#endif
131 mcr p15, 0, r11, c2, c0, 2 @ TTB control register 138 mcr p15, 0, r11, c2, c0, 2 @ TTB control register
132 ldr r4, =PRRR @ PRRR 139 ldr r4, =PRRR @ PRRR
133 ldr r5, =NMRR @ NMRR 140 ldr r5, =NMRR @ NMRR
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index fb92abb91628..2861b155485a 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -336,8 +336,11 @@ static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
336 if (timer->posted) 336 if (timer->posted)
337 return; 337 return;
338 338
339 if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) 339 if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
340 timer->posted = OMAP_TIMER_NONPOSTED;
341 __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
340 return; 342 return;
343 }
341 344
342 __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 345 __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
343 OMAP_TIMER_CTRL_POSTED, 0); 346 OMAP_TIMER_CTRL_POSTED, 0);
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 23732cdff551..b31ee1b275b0 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -25,8 +25,9 @@ struct xen_p2m_entry {
25 struct rb_node rbnode_phys; 25 struct rb_node rbnode_phys;
26}; 26};
27 27
28rwlock_t p2m_lock; 28static rwlock_t p2m_lock;
29struct rb_root phys_to_mach = RB_ROOT; 29struct rb_root phys_to_mach = RB_ROOT;
30EXPORT_SYMBOL_GPL(phys_to_mach);
30static struct rb_root mach_to_phys = RB_ROOT; 31static struct rb_root mach_to_phys = RB_ROOT;
31 32
32static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) 33static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
@@ -200,7 +201,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
200} 201}
201EXPORT_SYMBOL_GPL(__set_phys_to_machine); 202EXPORT_SYMBOL_GPL(__set_phys_to_machine);
202 203
203int p2m_init(void) 204static int p2m_init(void)
204{ 205{
205 rwlock_init(&p2m_lock); 206 rwlock_init(&p2m_lock);
206 return 0; 207 return 0;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 88c8b6c1341a..6d4dd22ee4b7 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -159,8 +159,7 @@ config NR_CPUS
159 range 2 32 159 range 2 32
160 depends on SMP 160 depends on SMP
161 # These have to remain sorted largest to smallest 161 # These have to remain sorted largest to smallest
162 default "8" if ARCH_XGENE 162 default "8"
163 default "4"
164 163
165config HOTPLUG_CPU 164config HOTPLUG_CPU
166 bool "Support for hot-pluggable CPUs" 165 bool "Support for hot-pluggable CPUs"
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 84fcc5018284..519c4b2c0687 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -6,6 +6,8 @@
6 6
7/dts-v1/; 7/dts-v1/;
8 8
9/memreserve/ 0x80000000 0x00010000;
10
9/ { 11/ {
10 model = "Foundation-v8A"; 12 model = "Foundation-v8A";
11 compatible = "arm,foundation-aarch64", "arm,vexpress"; 13 compatible = "arm,foundation-aarch64", "arm,vexpress";
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4cc813eddacb..572769727227 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
229extern void __iounmap(volatile void __iomem *addr); 229extern void __iounmap(volatile void __iomem *addr);
230extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); 230extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
231 231
232#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) 232#define PROT_DEFAULT (pgprot_default | PTE_DIRTY)
233#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 233#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
234#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) 234#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
235#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) 235#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index aa11943b8502..b2fcfbc51ecc 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -56,6 +56,9 @@ static inline void arch_local_irq_disable(void)
56#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory") 56#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
57#define local_fiq_disable() asm("msr daifset, #1" : : : "memory") 57#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
58 58
59#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
60#define local_async_disable() asm("msr daifset, #4" : : : "memory")
61
59/* 62/*
60 * Save the current interrupt enable state. 63 * Save the current interrupt enable state.
61 */ 64 */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 755f86143320..b1d2e26c3c88 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -43,7 +43,7 @@
43 * Section 43 * Section
44 */ 44 */
45#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) 45#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
46#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2) 46#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
47#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ 47#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
48#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ 48#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
49#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 49#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 17bd3af0a117..7f2b60affbb4 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,10 +25,11 @@
25 * Software defined PTE bits definition. 25 * Software defined PTE bits definition.
26 */ 26 */
27#define PTE_VALID (_AT(pteval_t, 1) << 0) 27#define PTE_VALID (_AT(pteval_t, 1) << 0)
28#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */ 28#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
29#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */
30#define PTE_DIRTY (_AT(pteval_t, 1) << 55) 29#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
31#define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 30#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
31 /* bit 57 for PMD_SECT_SPLITTING */
32#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
32 33
33/* 34/*
34 * VMALLOC and SPARSEMEM_VMEMMAP ranges. 35 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
@@ -254,7 +255,7 @@ static inline int has_transparent_hugepage(void)
254#define pgprot_noncached(prot) \ 255#define pgprot_noncached(prot) \
255 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) 256 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
256#define pgprot_writecombine(prot) \ 257#define pgprot_writecombine(prot) \
257 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE)) 258 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
258#define pgprot_dmacoherent(prot) \ 259#define pgprot_dmacoherent(prot) \
259 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) 260 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
260#define __HAVE_PHYS_MEM_ACCESS_PROT 261#define __HAVE_PHYS_MEM_ACCESS_PROT
@@ -357,18 +358,20 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
357 358
358/* 359/*
359 * Encode and decode a swap entry: 360 * Encode and decode a swap entry:
360 * bits 0, 2: present (must both be zero) 361 * bits 0-1: present (must be zero)
361 * bit 3: PTE_FILE 362 * bit 2: PTE_FILE
362 * bits 4-8: swap type 363 * bits 3-8: swap type
363 * bits 9-63: swap offset 364 * bits 9-57: swap offset
364 */ 365 */
365#define __SWP_TYPE_SHIFT 4 366#define __SWP_TYPE_SHIFT 3
366#define __SWP_TYPE_BITS 6 367#define __SWP_TYPE_BITS 6
368#define __SWP_OFFSET_BITS 49
367#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 369#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
368#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 370#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
371#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
369 372
370#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 373#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
371#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 374#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
372#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 375#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
373 376
374#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 377#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
@@ -382,15 +385,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
382 385
383/* 386/*
384 * Encode and decode a file entry: 387 * Encode and decode a file entry:
385 * bits 0, 2: present (must both be zero) 388 * bits 0-1: present (must be zero)
386 * bit 3: PTE_FILE 389 * bit 2: PTE_FILE
387 * bits 4-63: file offset / PAGE_SIZE 390 * bits 3-57: file offset / PAGE_SIZE
388 */ 391 */
389#define pte_file(pte) (pte_val(pte) & PTE_FILE) 392#define pte_file(pte) (pte_val(pte) & PTE_FILE)
390#define pte_to_pgoff(x) (pte_val(x) >> 4) 393#define pte_to_pgoff(x) (pte_val(x) >> 3)
391#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) 394#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
392 395
393#define PTE_FILE_MAX_BITS 60 396#define PTE_FILE_MAX_BITS 55
394 397
395extern int kern_addr_valid(unsigned long addr); 398extern int kern_addr_valid(unsigned long addr);
396 399
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 23a3c4791d86..720e70b66ffd 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -89,12 +89,6 @@ static inline struct thread_info *current_thread_info(void)
89#endif 89#endif
90 90
91/* 91/*
92 * We use bit 30 of the preempt_count to indicate that kernel
93 * preemption is occurring. See <asm/hardirq.h>.
94 */
95#define PREEMPT_ACTIVE 0x40000000
96
97/*
98 * thread information flags: 92 * thread information flags:
99 * TIF_SYSCALL_TRACE - syscall trace active 93 * TIF_SYSCALL_TRACE - syscall trace active
100 * TIF_SIGPENDING - signal pending 94 * TIF_SIGPENDING - signal pending
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 6a0a9b132d7a..4ae68579031d 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -248,7 +248,8 @@ static int brk_handler(unsigned long addr, unsigned int esr,
248int aarch32_break_handler(struct pt_regs *regs) 248int aarch32_break_handler(struct pt_regs *regs)
249{ 249{
250 siginfo_t info; 250 siginfo_t info;
251 unsigned int instr; 251 u32 arm_instr;
252 u16 thumb_instr;
252 bool bp = false; 253 bool bp = false;
253 void __user *pc = (void __user *)instruction_pointer(regs); 254 void __user *pc = (void __user *)instruction_pointer(regs);
254 255
@@ -257,18 +258,21 @@ int aarch32_break_handler(struct pt_regs *regs)
257 258
258 if (compat_thumb_mode(regs)) { 259 if (compat_thumb_mode(regs)) {
259 /* get 16-bit Thumb instruction */ 260 /* get 16-bit Thumb instruction */
260 get_user(instr, (u16 __user *)pc); 261 get_user(thumb_instr, (u16 __user *)pc);
261 if (instr == AARCH32_BREAK_THUMB2_LO) { 262 thumb_instr = le16_to_cpu(thumb_instr);
263 if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
262 /* get second half of 32-bit Thumb-2 instruction */ 264 /* get second half of 32-bit Thumb-2 instruction */
263 get_user(instr, (u16 __user *)(pc + 2)); 265 get_user(thumb_instr, (u16 __user *)(pc + 2));
264 bp = instr == AARCH32_BREAK_THUMB2_HI; 266 thumb_instr = le16_to_cpu(thumb_instr);
267 bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
265 } else { 268 } else {
266 bp = instr == AARCH32_BREAK_THUMB; 269 bp = thumb_instr == AARCH32_BREAK_THUMB;
267 } 270 }
268 } else { 271 } else {
269 /* 32-bit ARM instruction */ 272 /* 32-bit ARM instruction */
270 get_user(instr, (u32 __user *)pc); 273 get_user(arm_instr, (u32 __user *)pc);
271 bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; 274 arm_instr = le32_to_cpu(arm_instr);
275 bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
272 } 276 }
273 277
274 if (!bp) 278 if (!bp)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e1166145ca29..4d2c6f3f0c41 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -309,15 +309,12 @@ el1_irq:
309#ifdef CONFIG_TRACE_IRQFLAGS 309#ifdef CONFIG_TRACE_IRQFLAGS
310 bl trace_hardirqs_off 310 bl trace_hardirqs_off
311#endif 311#endif
312#ifdef CONFIG_PREEMPT 312
313 get_thread_info tsk
314 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
315 add w0, w24, #1 // increment it
316 str w0, [tsk, #TI_PREEMPT]
317#endif
318 irq_handler 313 irq_handler
314
319#ifdef CONFIG_PREEMPT 315#ifdef CONFIG_PREEMPT
320 str w24, [tsk, #TI_PREEMPT] // restore preempt count 316 get_thread_info tsk
317 ldr w24, [tsk, #TI_PREEMPT] // restore preempt count
321 cbnz w24, 1f // preempt count != 0 318 cbnz w24, 1f // preempt count != 0
322 ldr x0, [tsk, #TI_FLAGS] // get flags 319 ldr x0, [tsk, #TI_FLAGS] // get flags
323 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? 320 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
@@ -507,22 +504,10 @@ el0_irq_naked:
507#ifdef CONFIG_TRACE_IRQFLAGS 504#ifdef CONFIG_TRACE_IRQFLAGS
508 bl trace_hardirqs_off 505 bl trace_hardirqs_off
509#endif 506#endif
510 get_thread_info tsk 507
511#ifdef CONFIG_PREEMPT
512 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
513 add w23, w24, #1 // increment it
514 str w23, [tsk, #TI_PREEMPT]
515#endif
516 irq_handler 508 irq_handler
517#ifdef CONFIG_PREEMPT 509 get_thread_info tsk
518 ldr w0, [tsk, #TI_PREEMPT] 510
519 str w24, [tsk, #TI_PREEMPT]
520 cmp w0, w23
521 b.eq 1f
522 mov x1, #0
523 str x1, [x1] // BUG
5241:
525#endif
526#ifdef CONFIG_TRACE_IRQFLAGS 511#ifdef CONFIG_TRACE_IRQFLAGS
527 bl trace_hardirqs_on 512 bl trace_hardirqs_on
528#endif 513#endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7009387348b7..c68cca5c3523 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -282,8 +282,9 @@ ENDPROC(secondary_holding_pen)
282 * be used where CPUs are brought online dynamically by the kernel. 282 * be used where CPUs are brought online dynamically by the kernel.
283 */ 283 */
284ENTRY(secondary_entry) 284ENTRY(secondary_entry)
285 bl __calc_phys_offset // x2=phys offset
286 bl el2_setup // Drop to EL1 285 bl el2_setup // Drop to EL1
286 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
287 bl set_cpu_boot_mode_flag
287 b secondary_startup 288 b secondary_startup
288ENDPROC(secondary_entry) 289ENDPROC(secondary_entry)
289 290
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index fecdbf7de82e..6777a2192b83 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -636,28 +636,27 @@ static int compat_gpr_get(struct task_struct *target,
636 636
637 for (i = 0; i < num_regs; ++i) { 637 for (i = 0; i < num_regs; ++i) {
638 unsigned int idx = start + i; 638 unsigned int idx = start + i;
639 void *reg; 639 compat_ulong_t reg;
640 640
641 switch (idx) { 641 switch (idx) {
642 case 15: 642 case 15:
643 reg = (void *)&task_pt_regs(target)->pc; 643 reg = task_pt_regs(target)->pc;
644 break; 644 break;
645 case 16: 645 case 16:
646 reg = (void *)&task_pt_regs(target)->pstate; 646 reg = task_pt_regs(target)->pstate;
647 break; 647 break;
648 case 17: 648 case 17:
649 reg = (void *)&task_pt_regs(target)->orig_x0; 649 reg = task_pt_regs(target)->orig_x0;
650 break; 650 break;
651 default: 651 default:
652 reg = (void *)&task_pt_regs(target)->regs[idx]; 652 reg = task_pt_regs(target)->regs[idx];
653 } 653 }
654 654
655 ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t)); 655 ret = copy_to_user(ubuf, &reg, sizeof(reg));
656
657 if (ret) 656 if (ret)
658 break; 657 break;
659 else 658
660 ubuf += sizeof(compat_ulong_t); 659 ubuf += sizeof(reg);
661 } 660 }
662 661
663 return ret; 662 return ret;
@@ -685,28 +684,28 @@ static int compat_gpr_set(struct task_struct *target,
685 684
686 for (i = 0; i < num_regs; ++i) { 685 for (i = 0; i < num_regs; ++i) {
687 unsigned int idx = start + i; 686 unsigned int idx = start + i;
688 void *reg; 687 compat_ulong_t reg;
688
689 ret = copy_from_user(&reg, ubuf, sizeof(reg));
690 if (ret)
691 return ret;
692
693 ubuf += sizeof(reg);
689 694
690 switch (idx) { 695 switch (idx) {
691 case 15: 696 case 15:
692 reg = (void *)&newregs.pc; 697 newregs.pc = reg;
693 break; 698 break;
694 case 16: 699 case 16:
695 reg = (void *)&newregs.pstate; 700 newregs.pstate = reg;
696 break; 701 break;
697 case 17: 702 case 17:
698 reg = (void *)&newregs.orig_x0; 703 newregs.orig_x0 = reg;
699 break; 704 break;
700 default: 705 default:
701 reg = (void *)&newregs.regs[idx]; 706 newregs.regs[idx] = reg;
702 } 707 }
703 708
704 ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
705
706 if (ret)
707 goto out;
708 else
709 ubuf += sizeof(compat_ulong_t);
710 } 709 }
711 710
712 if (valid_user_regs(&newregs.user_regs)) 711 if (valid_user_regs(&newregs.user_regs))
@@ -714,7 +713,6 @@ static int compat_gpr_set(struct task_struct *target,
714 else 713 else
715 ret = -EINVAL; 714 ret = -EINVAL;
716 715
717out:
718 return ret; 716 return ret;
719} 717}
720 718
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 0bc5e4cbc017..bd9bbd0e44ed 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -205,6 +205,11 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
205 205
206void __init setup_arch(char **cmdline_p) 206void __init setup_arch(char **cmdline_p)
207{ 207{
208 /*
209 * Unmask asynchronous aborts early to catch possible system errors.
210 */
211 local_async_enable();
212
208 setup_processor(); 213 setup_processor();
209 214
210 setup_machine_fdt(__fdt_pointer); 215 setup_machine_fdt(__fdt_pointer);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a5aeefab03c3..a0c2ca602cf8 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -160,6 +160,7 @@ asmlinkage void secondary_start_kernel(void)
160 160
161 local_irq_enable(); 161 local_irq_enable();
162 local_fiq_enable(); 162 local_fiq_enable();
163 local_async_enable();
163 164
164 /* 165 /*
165 * OK, it's off to the idle thread for us 166 * OK, it's off to the idle thread for us
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 421b99fd635d..0f7fec52c7f8 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -111,12 +111,12 @@ ENTRY(__cpu_setup)
111 bl __flush_dcache_all 111 bl __flush_dcache_all
112 mov lr, x28 112 mov lr, x28
113 ic iallu // I+BTB cache invalidate 113 ic iallu // I+BTB cache invalidate
114 tlbi vmalle1is // invalidate I + D TLBs
114 dsb sy 115 dsb sy
115 116
116 mov x0, #3 << 20 117 mov x0, #3 << 20
117 msr cpacr_el1, x0 // Enable FP/ASIMD 118 msr cpacr_el1, x0 // Enable FP/ASIMD
118 msr mdscr_el1, xzr // Reset mdscr_el1 119 msr mdscr_el1, xzr // Reset mdscr_el1
119 tlbi vmalle1is // invalidate I + D TLBs
120 /* 120 /*
121 * Memory region attributes for LPAE: 121 * Memory region attributes for LPAE:
122 * 122 *
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 7b1f2cd85400..1f121497b517 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -298,8 +298,10 @@ static int __init set_abdac_rate(struct platform_device *pdev)
298 */ 298 */
299 retval = clk_round_rate(pll1, 299 retval = clk_round_rate(pll1,
300 CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16); 300 CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16);
301 if (retval < 0) 301 if (retval <= 0) {
302 retval = -EINVAL;
302 goto out_abdac; 303 goto out_abdac;
304 }
303 305
304 retval = clk_set_rate(pll1, retval); 306 retval = clk_set_rate(pll1, retval);
305 if (retval != 0) 307 if (retval != 0)
diff --git a/arch/avr32/boot/u-boot/head.S b/arch/avr32/boot/u-boot/head.S
index 4488fa27fe94..2ffc298f061b 100644
--- a/arch/avr32/boot/u-boot/head.S
+++ b/arch/avr32/boot/u-boot/head.S
@@ -8,6 +8,8 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <asm/setup.h> 10#include <asm/setup.h>
11#include <asm/thread_info.h>
12#include <asm/sysreg.h>
11 13
12 /* 14 /*
13 * The kernel is loaded where we want it to be and all caches 15 * The kernel is loaded where we want it to be and all caches
@@ -20,11 +22,6 @@
20 .section .init.text,"ax" 22 .section .init.text,"ax"
21 .global _start 23 .global _start
22_start: 24_start:
23 /* Check if the boot loader actually provided a tag table */
24 lddpc r0, magic_number
25 cp.w r12, r0
26 brne no_tag_table
27
28 /* Initialize .bss */ 25 /* Initialize .bss */
29 lddpc r2, bss_start_addr 26 lddpc r2, bss_start_addr
30 lddpc r3, end_addr 27 lddpc r3, end_addr
@@ -34,6 +31,25 @@ _start:
34 cp r2, r3 31 cp r2, r3
35 brlo 1b 32 brlo 1b
36 33
34 /* Initialize status register */
35 lddpc r0, init_sr
36 mtsr SYSREG_SR, r0
37
38 /* Set initial stack pointer */
39 lddpc sp, stack_addr
40 sub sp, -THREAD_SIZE
41
42#ifdef CONFIG_FRAME_POINTER
43 /* Mark last stack frame */
44 mov lr, 0
45 mov r7, 0
46#endif
47
48 /* Check if the boot loader actually provided a tag table */
49 lddpc r0, magic_number
50 cp.w r12, r0
51 brne no_tag_table
52
37 /* 53 /*
38 * Save the tag table address for later use. This must be done 54 * Save the tag table address for later use. This must be done
39 * _after_ .bss has been initialized... 55 * _after_ .bss has been initialized...
@@ -53,8 +69,15 @@ bss_start_addr:
53 .long __bss_start 69 .long __bss_start
54end_addr: 70end_addr:
55 .long _end 71 .long _end
72init_sr:
73 .long 0x007f0000 /* Supervisor mode, everything masked */
74stack_addr:
75 .long init_thread_union
76panic_addr:
77 .long panic
56 78
57no_tag_table: 79no_tag_table:
58 sub r12, pc, (. - 2f) 80 sub r12, pc, (. - 2f)
59 bral panic 81 /* branch to panic() which can be far away with that construct */
82 lddpc pc, panic_addr
602: .asciz "Boot loader didn't provide correct magic number\n" 832: .asciz "Boot loader didn't provide correct magic number\n"
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index d5aff36ade92..4733e38e7ae6 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
59# CONFIG_PREVENT_FIRMWARE_BUILD is not set 59# CONFIG_PREVENT_FIRMWARE_BUILD is not set
60# CONFIG_FW_LOADER is not set 60# CONFIG_FW_LOADER is not set
61CONFIG_MTD=y 61CONFIG_MTD=y
62CONFIG_MTD_PARTITIONS=y
63CONFIG_MTD_CMDLINE_PARTS=y 62CONFIG_MTD_CMDLINE_PARTS=y
64CONFIG_MTD_CHAR=y 63CONFIG_MTD_CHAR=y
65CONFIG_MTD_BLOCK=y 64CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 4abcf435d599..1be0ee31bd91 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
61# CONFIG_PREVENT_FIRMWARE_BUILD is not set 61# CONFIG_PREVENT_FIRMWARE_BUILD is not set
62# CONFIG_FW_LOADER is not set 62# CONFIG_FW_LOADER is not set
63CONFIG_MTD=y 63CONFIG_MTD=y
64CONFIG_MTD_PARTITIONS=y
65CONFIG_MTD_CMDLINE_PARTS=y 64CONFIG_MTD_CMDLINE_PARTS=y
66CONFIG_MTD_CHAR=y 65CONFIG_MTD_CHAR=y
67CONFIG_MTD_BLOCK=y 66CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 18f3fa0470ff..796e536f7bc4 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -60,7 +60,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
60# CONFIG_PREVENT_FIRMWARE_BUILD is not set 60# CONFIG_PREVENT_FIRMWARE_BUILD is not set
61# CONFIG_FW_LOADER is not set 61# CONFIG_FW_LOADER is not set
62CONFIG_MTD=y 62CONFIG_MTD=y
63CONFIG_MTD_PARTITIONS=y
64CONFIG_MTD_CMDLINE_PARTS=y 63CONFIG_MTD_CMDLINE_PARTS=y
65CONFIG_MTD_CHAR=y 64CONFIG_MTD_CHAR=y
66CONFIG_MTD_BLOCK=y 65CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig
index 06e389cfcd12..9a57da44eb6f 100644
--- a/arch/avr32/configs/atngw100_mrmt_defconfig
+++ b/arch/avr32/configs/atngw100_mrmt_defconfig
@@ -48,7 +48,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
48# CONFIG_PREVENT_FIRMWARE_BUILD is not set 48# CONFIG_PREVENT_FIRMWARE_BUILD is not set
49# CONFIG_FW_LOADER is not set 49# CONFIG_FW_LOADER is not set
50CONFIG_MTD=y 50CONFIG_MTD=y
51CONFIG_MTD_PARTITIONS=y
52CONFIG_MTD_CMDLINE_PARTS=y 51CONFIG_MTD_CMDLINE_PARTS=y
53CONFIG_MTD_CHAR=y 52CONFIG_MTD_CHAR=y
54CONFIG_MTD_BLOCK=y 53CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index 2518a1368d7c..97fe1b399b06 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
59# CONFIG_PREVENT_FIRMWARE_BUILD is not set 59# CONFIG_PREVENT_FIRMWARE_BUILD is not set
60# CONFIG_FW_LOADER is not set 60# CONFIG_FW_LOADER is not set
61CONFIG_MTD=y 61CONFIG_MTD=y
62CONFIG_MTD_PARTITIONS=y
63CONFIG_MTD_CMDLINE_PARTS=y 62CONFIG_MTD_CMDLINE_PARTS=y
64CONFIG_MTD_CHAR=y 63CONFIG_MTD_CHAR=y
65CONFIG_MTD_BLOCK=y 64CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index 245ef6bd0fa6..a176d24467e9 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -62,7 +62,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
62# CONFIG_PREVENT_FIRMWARE_BUILD is not set 62# CONFIG_PREVENT_FIRMWARE_BUILD is not set
63# CONFIG_FW_LOADER is not set 63# CONFIG_FW_LOADER is not set
64CONFIG_MTD=y 64CONFIG_MTD=y
65CONFIG_MTD_PARTITIONS=y
66CONFIG_MTD_CMDLINE_PARTS=y 65CONFIG_MTD_CMDLINE_PARTS=y
67CONFIG_MTD_CHAR=y 66CONFIG_MTD_CHAR=y
68CONFIG_MTD_BLOCK=y 67CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index fa6cbac6e418..d1bf6dcfc47d 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
61# CONFIG_PREVENT_FIRMWARE_BUILD is not set 61# CONFIG_PREVENT_FIRMWARE_BUILD is not set
62# CONFIG_FW_LOADER is not set 62# CONFIG_FW_LOADER is not set
63CONFIG_MTD=y 63CONFIG_MTD=y
64CONFIG_MTD_PARTITIONS=y
65CONFIG_MTD_CMDLINE_PARTS=y 64CONFIG_MTD_CMDLINE_PARTS=y
66CONFIG_MTD_CHAR=y 65CONFIG_MTD_CHAR=y
67CONFIG_MTD_BLOCK=y 66CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index bbd5131021a5..2813dd2b9138 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -53,7 +53,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
53# CONFIG_PREVENT_FIRMWARE_BUILD is not set 53# CONFIG_PREVENT_FIRMWARE_BUILD is not set
54# CONFIG_FW_LOADER is not set 54# CONFIG_FW_LOADER is not set
55CONFIG_MTD=y 55CONFIG_MTD=y
56CONFIG_MTD_PARTITIONS=y
57CONFIG_MTD_CMDLINE_PARTS=y 56CONFIG_MTD_CMDLINE_PARTS=y
58CONFIG_MTD_CHAR=y 57CONFIG_MTD_CHAR=y
59CONFIG_MTD_BLOCK=y 58CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index c1cd726f9012..f8ff3a3baad4 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
42# CONFIG_PREVENT_FIRMWARE_BUILD is not set 42# CONFIG_PREVENT_FIRMWARE_BUILD is not set
43# CONFIG_FW_LOADER is not set 43# CONFIG_FW_LOADER is not set
44CONFIG_MTD=y 44CONFIG_MTD=y
45CONFIG_MTD_PARTITIONS=y
46CONFIG_MTD_CMDLINE_PARTS=y 45CONFIG_MTD_CMDLINE_PARTS=y
47CONFIG_MTD_CHAR=y 46CONFIG_MTD_CHAR=y
48CONFIG_MTD_BLOCK=y 47CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 754ae56b2767..992228e54e38 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
42# CONFIG_PREVENT_FIRMWARE_BUILD is not set 42# CONFIG_PREVENT_FIRMWARE_BUILD is not set
43# CONFIG_FW_LOADER is not set 43# CONFIG_FW_LOADER is not set
44CONFIG_MTD=y 44CONFIG_MTD=y
45CONFIG_MTD_PARTITIONS=y
46CONFIG_MTD_CMDLINE_PARTS=y 45CONFIG_MTD_CMDLINE_PARTS=y
47CONFIG_MTD_CHAR=y 46CONFIG_MTD_CHAR=y
48CONFIG_MTD_BLOCK=y 47CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index 58589d8cc0ac..b8e698b0d1fa 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -54,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
54# CONFIG_PREVENT_FIRMWARE_BUILD is not set 54# CONFIG_PREVENT_FIRMWARE_BUILD is not set
55# CONFIG_FW_LOADER is not set 55# CONFIG_FW_LOADER is not set
56CONFIG_MTD=y 56CONFIG_MTD=y
57CONFIG_MTD_PARTITIONS=y
58CONFIG_MTD_CMDLINE_PARTS=y 57CONFIG_MTD_CMDLINE_PARTS=y
59CONFIG_MTD_CHAR=y 58CONFIG_MTD_CHAR=y
60CONFIG_MTD_BLOCK=y 59CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index c90fbf6d35bc..07bed3f7eb5e 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
58# CONFIG_PREVENT_FIRMWARE_BUILD is not set 58# CONFIG_PREVENT_FIRMWARE_BUILD is not set
59# CONFIG_FW_LOADER is not set 59# CONFIG_FW_LOADER is not set
60CONFIG_MTD=y 60CONFIG_MTD=y
61CONFIG_MTD_PARTITIONS=y
62CONFIG_MTD_CMDLINE_PARTS=y 61CONFIG_MTD_CMDLINE_PARTS=y
63CONFIG_MTD_CHAR=y 62CONFIG_MTD_CHAR=y
64CONFIG_MTD_BLOCK=y 63CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index ba7c31e269cb..18db853386c8 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
58# CONFIG_PREVENT_FIRMWARE_BUILD is not set 58# CONFIG_PREVENT_FIRMWARE_BUILD is not set
59# CONFIG_FW_LOADER is not set 59# CONFIG_FW_LOADER is not set
60CONFIG_MTD=y 60CONFIG_MTD=y
61CONFIG_MTD_PARTITIONS=y
62CONFIG_MTD_CMDLINE_PARTS=y 61CONFIG_MTD_CMDLINE_PARTS=y
63CONFIG_MTD_CHAR=y 62CONFIG_MTD_CHAR=y
64CONFIG_MTD_BLOCK=y 63CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
index 65de4431108c..91df6b2986be 100644
--- a/arch/avr32/configs/merisc_defconfig
+++ b/arch/avr32/configs/merisc_defconfig
@@ -46,7 +46,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
46# CONFIG_FW_LOADER is not set 46# CONFIG_FW_LOADER is not set
47CONFIG_MTD=y 47CONFIG_MTD=y
48CONFIG_MTD_CONCAT=y 48CONFIG_MTD_CONCAT=y
49CONFIG_MTD_PARTITIONS=y
50CONFIG_MTD_CHAR=y 49CONFIG_MTD_CHAR=y
51CONFIG_MTD_BLOCK=y 50CONFIG_MTD_BLOCK=y
52CONFIG_MTD_CFI=y 51CONFIG_MTD_CFI=y
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig
index 0a8bfdc420e0..d630e089dd32 100644
--- a/arch/avr32/configs/mimc200_defconfig
+++ b/arch/avr32/configs/mimc200_defconfig
@@ -49,7 +49,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
49# CONFIG_PREVENT_FIRMWARE_BUILD is not set 49# CONFIG_PREVENT_FIRMWARE_BUILD is not set
50# CONFIG_FW_LOADER is not set 50# CONFIG_FW_LOADER is not set
51CONFIG_MTD=y 51CONFIG_MTD=y
52CONFIG_MTD_PARTITIONS=y
53CONFIG_MTD_CMDLINE_PARTS=y 52CONFIG_MTD_CMDLINE_PARTS=y
54CONFIG_MTD_CHAR=y 53CONFIG_MTD_CHAR=y
55CONFIG_MTD_BLOCK=y 54CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/include/asm/kprobes.h b/arch/avr32/include/asm/kprobes.h
index 996cb656474e..45f563ed73fd 100644
--- a/arch/avr32/include/asm/kprobes.h
+++ b/arch/avr32/include/asm/kprobes.h
@@ -16,6 +16,7 @@
16typedef u16 kprobe_opcode_t; 16typedef u16 kprobe_opcode_t;
17#define BREAKPOINT_INSTRUCTION 0xd673 /* breakpoint */ 17#define BREAKPOINT_INSTRUCTION 0xd673 /* breakpoint */
18#define MAX_INSN_SIZE 2 18#define MAX_INSN_SIZE 2
19#define MAX_STACK_SIZE 64 /* 32 would probably be OK */
19 20
20#define kretprobe_blacklist_size 0 21#define kretprobe_blacklist_size 0
21 22
@@ -26,6 +27,19 @@ struct arch_specific_insn {
26 kprobe_opcode_t insn[MAX_INSN_SIZE]; 27 kprobe_opcode_t insn[MAX_INSN_SIZE];
27}; 28};
28 29
30struct prev_kprobe {
31 struct kprobe *kp;
32 unsigned int status;
33};
34
35/* per-cpu kprobe control block */
36struct kprobe_ctlblk {
37 unsigned int kprobe_status;
38 struct prev_kprobe prev_kprobe;
39 struct pt_regs jprobe_saved_regs;
40 char jprobes_stack[MAX_STACK_SIZE];
41};
42
29extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 43extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
30extern int kprobe_exceptions_notify(struct notifier_block *self, 44extern int kprobe_exceptions_notify(struct notifier_block *self,
31 unsigned long val, void *data); 45 unsigned long val, void *data);
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index 6dc62e1f94c7..a978f3fe7c25 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -66,8 +66,6 @@ static inline struct thread_info *current_thread_info(void)
66 66
67#endif /* !__ASSEMBLY__ */ 67#endif /* !__ASSEMBLY__ */
68 68
69#define PREEMPT_ACTIVE 0x40000000
70
71/* 69/*
72 * Thread information flags 70 * Thread information flags
73 * - these are process state flags that various assembly files may need to access 71 * - these are process state flags that various assembly files may need to access
diff --git a/arch/avr32/include/uapi/asm/Kbuild b/arch/avr32/include/uapi/asm/Kbuild
index 3b85eaddf525..08d8a3d76ea8 100644
--- a/arch/avr32/include/uapi/asm/Kbuild
+++ b/arch/avr32/include/uapi/asm/Kbuild
@@ -2,35 +2,35 @@
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4header-y += auxvec.h 4header-y += auxvec.h
5header-y += bitsperlong.h
6header-y += byteorder.h 5header-y += byteorder.h
7header-y += cachectl.h 6header-y += cachectl.h
8header-y += errno.h
9header-y += fcntl.h
10header-y += ioctl.h
11header-y += ioctls.h
12header-y += ipcbuf.h
13header-y += kvm_para.h
14header-y += mman.h
15header-y += msgbuf.h 7header-y += msgbuf.h
16header-y += param.h 8header-y += param.h
17header-y += poll.h
18header-y += posix_types.h 9header-y += posix_types.h
19header-y += ptrace.h 10header-y += ptrace.h
20header-y += resource.h
21header-y += sembuf.h 11header-y += sembuf.h
22header-y += setup.h 12header-y += setup.h
23header-y += shmbuf.h 13header-y += shmbuf.h
24header-y += sigcontext.h 14header-y += sigcontext.h
25header-y += siginfo.h
26header-y += signal.h 15header-y += signal.h
27header-y += socket.h 16header-y += socket.h
28header-y += sockios.h 17header-y += sockios.h
29header-y += stat.h 18header-y += stat.h
30header-y += statfs.h
31header-y += swab.h 19header-y += swab.h
32header-y += termbits.h 20header-y += termbits.h
33header-y += termios.h 21header-y += termios.h
34header-y += types.h 22header-y += types.h
35header-y += unistd.h 23header-y += unistd.h
24generic-y += bitsperlong.h
25generic-y += errno.h
26generic-y += fcntl.h
27generic-y += ioctl.h
28generic-y += ioctls.h
29generic-y += ipcbuf.h
30generic-y += kvm_para.h
31generic-y += mman.h
36generic-y += param.h 32generic-y += param.h
33generic-y += poll.h
34generic-y += resource.h
35generic-y += siginfo.h
36generic-y += statfs.h
diff --git a/arch/avr32/include/uapi/asm/auxvec.h b/arch/avr32/include/uapi/asm/auxvec.h
index d5dd435bf8f4..4f02da3ffefa 100644
--- a/arch/avr32/include/uapi/asm/auxvec.h
+++ b/arch/avr32/include/uapi/asm/auxvec.h
@@ -1,4 +1,4 @@
1#ifndef __ASM_AVR32_AUXVEC_H 1#ifndef _UAPI__ASM_AVR32_AUXVEC_H
2#define __ASM_AVR32_AUXVEC_H 2#define _UAPI__ASM_AVR32_AUXVEC_H
3 3
4#endif /* __ASM_AVR32_AUXVEC_H */ 4#endif /* _UAPI__ASM_AVR32_AUXVEC_H */
diff --git a/arch/avr32/include/uapi/asm/bitsperlong.h b/arch/avr32/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/avr32/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/bitsperlong.h>
diff --git a/arch/avr32/include/uapi/asm/byteorder.h b/arch/avr32/include/uapi/asm/byteorder.h
index 50abc21619a8..71242f0d39c6 100644
--- a/arch/avr32/include/uapi/asm/byteorder.h
+++ b/arch/avr32/include/uapi/asm/byteorder.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * AVR32 endian-conversion functions. 2 * AVR32 endian-conversion functions.
3 */ 3 */
4#ifndef __ASM_AVR32_BYTEORDER_H 4#ifndef _UAPI__ASM_AVR32_BYTEORDER_H
5#define __ASM_AVR32_BYTEORDER_H 5#define _UAPI__ASM_AVR32_BYTEORDER_H
6 6
7#include <linux/byteorder/big_endian.h> 7#include <linux/byteorder/big_endian.h>
8 8
9#endif /* __ASM_AVR32_BYTEORDER_H */ 9#endif /* _UAPI__ASM_AVR32_BYTEORDER_H */
diff --git a/arch/avr32/include/uapi/asm/cachectl.h b/arch/avr32/include/uapi/asm/cachectl.h
index 4faf1ce60061..573a9584dd57 100644
--- a/arch/avr32/include/uapi/asm/cachectl.h
+++ b/arch/avr32/include/uapi/asm/cachectl.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_AVR32_CACHECTL_H 1#ifndef _UAPI__ASM_AVR32_CACHECTL_H
2#define __ASM_AVR32_CACHECTL_H 2#define _UAPI__ASM_AVR32_CACHECTL_H
3 3
4/* 4/*
5 * Operations that can be performed through the cacheflush system call 5 * Operations that can be performed through the cacheflush system call
@@ -8,4 +8,4 @@
8/* Clean the data cache, then invalidate the icache */ 8/* Clean the data cache, then invalidate the icache */
9#define CACHE_IFLUSH 0 9#define CACHE_IFLUSH 0
10 10
11#endif /* __ASM_AVR32_CACHECTL_H */ 11#endif /* _UAPI__ASM_AVR32_CACHECTL_H */
diff --git a/arch/avr32/include/uapi/asm/errno.h b/arch/avr32/include/uapi/asm/errno.h
deleted file mode 100644
index 558a7249f06d..000000000000
--- a/arch/avr32/include/uapi/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_ERRNO_H
2#define __ASM_AVR32_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif /* __ASM_AVR32_ERRNO_H */
diff --git a/arch/avr32/include/uapi/asm/fcntl.h b/arch/avr32/include/uapi/asm/fcntl.h
deleted file mode 100644
index 14c0c4402b11..000000000000
--- a/arch/avr32/include/uapi/asm/fcntl.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_FCNTL_H
2#define __ASM_AVR32_FCNTL_H
3
4#include <asm-generic/fcntl.h>
5
6#endif /* __ASM_AVR32_FCNTL_H */
diff --git a/arch/avr32/include/uapi/asm/ioctl.h b/arch/avr32/include/uapi/asm/ioctl.h
deleted file mode 100644
index c8472c1398ef..000000000000
--- a/arch/avr32/include/uapi/asm/ioctl.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_IOCTL_H
2#define __ASM_AVR32_IOCTL_H
3
4#include <asm-generic/ioctl.h>
5
6#endif /* __ASM_AVR32_IOCTL_H */
diff --git a/arch/avr32/include/uapi/asm/ioctls.h b/arch/avr32/include/uapi/asm/ioctls.h
deleted file mode 100644
index 909cf66feaf5..000000000000
--- a/arch/avr32/include/uapi/asm/ioctls.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_IOCTLS_H
2#define __ASM_AVR32_IOCTLS_H
3
4#include <asm-generic/ioctls.h>
5
6#endif /* __ASM_AVR32_IOCTLS_H */
diff --git a/arch/avr32/include/uapi/asm/ipcbuf.h b/arch/avr32/include/uapi/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/avr32/include/uapi/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ipcbuf.h>
diff --git a/arch/avr32/include/uapi/asm/kvm_para.h b/arch/avr32/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/avr32/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kvm_para.h>
diff --git a/arch/avr32/include/uapi/asm/mman.h b/arch/avr32/include/uapi/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/avr32/include/uapi/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/mman.h>
diff --git a/arch/avr32/include/uapi/asm/msgbuf.h b/arch/avr32/include/uapi/asm/msgbuf.h
index ac18bc4da7f7..9eae6effad14 100644
--- a/arch/avr32/include/uapi/asm/msgbuf.h
+++ b/arch/avr32/include/uapi/asm/msgbuf.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_AVR32_MSGBUF_H 1#ifndef _UAPI__ASM_AVR32_MSGBUF_H
2#define __ASM_AVR32_MSGBUF_H 2#define _UAPI__ASM_AVR32_MSGBUF_H
3 3
4/* 4/*
5 * The msqid64_ds structure for i386 architecture. 5 * The msqid64_ds structure for i386 architecture.
@@ -28,4 +28,4 @@ struct msqid64_ds {
28 unsigned long __unused5; 28 unsigned long __unused5;
29}; 29};
30 30
31#endif /* __ASM_AVR32_MSGBUF_H */ 31#endif /* _UAPI__ASM_AVR32_MSGBUF_H */
diff --git a/arch/avr32/include/uapi/asm/poll.h b/arch/avr32/include/uapi/asm/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/arch/avr32/include/uapi/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/poll.h>
diff --git a/arch/avr32/include/uapi/asm/posix_types.h b/arch/avr32/include/uapi/asm/posix_types.h
index 9ba9e749b3f3..5b813a8abf09 100644
--- a/arch/avr32/include/uapi/asm/posix_types.h
+++ b/arch/avr32/include/uapi/asm/posix_types.h
@@ -5,8 +5,8 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8#ifndef __ASM_AVR32_POSIX_TYPES_H 8#ifndef _UAPI__ASM_AVR32_POSIX_TYPES_H
9#define __ASM_AVR32_POSIX_TYPES_H 9#define _UAPI__ASM_AVR32_POSIX_TYPES_H
10 10
11/* 11/*
12 * This file is generally used by user-level software, so you need to 12 * This file is generally used by user-level software, so you need to
@@ -34,4 +34,4 @@ typedef unsigned short __kernel_old_dev_t;
34 34
35#include <asm-generic/posix_types.h> 35#include <asm-generic/posix_types.h>
36 36
37#endif /* __ASM_AVR32_POSIX_TYPES_H */ 37#endif /* _UAPI__ASM_AVR32_POSIX_TYPES_H */
diff --git a/arch/avr32/include/uapi/asm/resource.h b/arch/avr32/include/uapi/asm/resource.h
deleted file mode 100644
index c6dd101472b1..000000000000
--- a/arch/avr32/include/uapi/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_RESOURCE_H
2#define __ASM_AVR32_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif /* __ASM_AVR32_RESOURCE_H */
diff --git a/arch/avr32/include/uapi/asm/sembuf.h b/arch/avr32/include/uapi/asm/sembuf.h
index e472216e0c97..6c6f7cf1e75a 100644
--- a/arch/avr32/include/uapi/asm/sembuf.h
+++ b/arch/avr32/include/uapi/asm/sembuf.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_AVR32_SEMBUF_H 1#ifndef _UAPI__ASM_AVR32_SEMBUF_H
2#define __ASM_AVR32_SEMBUF_H 2#define _UAPI__ASM_AVR32_SEMBUF_H
3 3
4/* 4/*
5* The semid64_ds structure for AVR32 architecture. 5* The semid64_ds structure for AVR32 architecture.
@@ -22,4 +22,4 @@ struct semid64_ds {
22 unsigned long __unused4; 22 unsigned long __unused4;
23}; 23};
24 24
25#endif /* __ASM_AVR32_SEMBUF_H */ 25#endif /* _UAPI__ASM_AVR32_SEMBUF_H */
diff --git a/arch/avr32/include/uapi/asm/setup.h b/arch/avr32/include/uapi/asm/setup.h
index e58aa9356faf..a654df7dba46 100644
--- a/arch/avr32/include/uapi/asm/setup.h
+++ b/arch/avr32/include/uapi/asm/setup.h
@@ -13,5 +13,4 @@
13 13
14#define COMMAND_LINE_SIZE 256 14#define COMMAND_LINE_SIZE 256
15 15
16
17#endif /* _UAPI__ASM_AVR32_SETUP_H__ */ 16#endif /* _UAPI__ASM_AVR32_SETUP_H__ */
diff --git a/arch/avr32/include/uapi/asm/shmbuf.h b/arch/avr32/include/uapi/asm/shmbuf.h
index c62fba41739a..b94cf8b60b73 100644
--- a/arch/avr32/include/uapi/asm/shmbuf.h
+++ b/arch/avr32/include/uapi/asm/shmbuf.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_AVR32_SHMBUF_H 1#ifndef _UAPI__ASM_AVR32_SHMBUF_H
2#define __ASM_AVR32_SHMBUF_H 2#define _UAPI__ASM_AVR32_SHMBUF_H
3 3
4/* 4/*
5 * The shmid64_ds structure for i386 architecture. 5 * The shmid64_ds structure for i386 architecture.
@@ -39,4 +39,4 @@ struct shminfo64 {
39 unsigned long __unused4; 39 unsigned long __unused4;
40}; 40};
41 41
42#endif /* __ASM_AVR32_SHMBUF_H */ 42#endif /* _UAPI__ASM_AVR32_SHMBUF_H */
diff --git a/arch/avr32/include/uapi/asm/sigcontext.h b/arch/avr32/include/uapi/asm/sigcontext.h
index e04062b5f39f..27e56bf6377f 100644
--- a/arch/avr32/include/uapi/asm/sigcontext.h
+++ b/arch/avr32/include/uapi/asm/sigcontext.h
@@ -5,8 +5,8 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8#ifndef __ASM_AVR32_SIGCONTEXT_H 8#ifndef _UAPI__ASM_AVR32_SIGCONTEXT_H
9#define __ASM_AVR32_SIGCONTEXT_H 9#define _UAPI__ASM_AVR32_SIGCONTEXT_H
10 10
11struct sigcontext { 11struct sigcontext {
12 unsigned long oldmask; 12 unsigned long oldmask;
@@ -31,4 +31,4 @@ struct sigcontext {
31 unsigned long r0; 31 unsigned long r0;
32}; 32};
33 33
34#endif /* __ASM_AVR32_SIGCONTEXT_H */ 34#endif /* _UAPI__ASM_AVR32_SIGCONTEXT_H */
diff --git a/arch/avr32/include/uapi/asm/siginfo.h b/arch/avr32/include/uapi/asm/siginfo.h
deleted file mode 100644
index 5ee93f40a8a8..000000000000
--- a/arch/avr32/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _AVR32_SIGINFO_H
2#define _AVR32_SIGINFO_H
3
4#include <asm-generic/siginfo.h>
5
6#endif
diff --git a/arch/avr32/include/uapi/asm/signal.h b/arch/avr32/include/uapi/asm/signal.h
index 1b77a93eff50..ffe8c770cafd 100644
--- a/arch/avr32/include/uapi/asm/signal.h
+++ b/arch/avr32/include/uapi/asm/signal.h
@@ -118,5 +118,4 @@ typedef struct sigaltstack {
118 size_t ss_size; 118 size_t ss_size;
119} stack_t; 119} stack_t;
120 120
121
122#endif /* _UAPI__ASM_AVR32_SIGNAL_H */ 121#endif /* _UAPI__ASM_AVR32_SIGNAL_H */
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 439936421434..cbf902e4cd9e 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_AVR32_SOCKET_H 1#ifndef _UAPI__ASM_AVR32_SOCKET_H
2#define __ASM_AVR32_SOCKET_H 2#define _UAPI__ASM_AVR32_SOCKET_H
3 3
4#include <asm/sockios.h> 4#include <asm/sockios.h>
5 5
@@ -78,4 +78,4 @@
78 78
79#define SO_MAX_PACING_RATE 47 79#define SO_MAX_PACING_RATE 47
80 80
81#endif /* __ASM_AVR32_SOCKET_H */ 81#endif /* _UAPI__ASM_AVR32_SOCKET_H */
diff --git a/arch/avr32/include/uapi/asm/sockios.h b/arch/avr32/include/uapi/asm/sockios.h
index 0802d742f97d..d04785453532 100644
--- a/arch/avr32/include/uapi/asm/sockios.h
+++ b/arch/avr32/include/uapi/asm/sockios.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_AVR32_SOCKIOS_H 1#ifndef _UAPI__ASM_AVR32_SOCKIOS_H
2#define __ASM_AVR32_SOCKIOS_H 2#define _UAPI__ASM_AVR32_SOCKIOS_H
3 3
4/* Socket-level I/O control calls. */ 4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901 5#define FIOSETOWN 0x8901
@@ -10,4 +10,4 @@
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ 10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ 11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12 12
13#endif /* __ASM_AVR32_SOCKIOS_H */ 13#endif /* _UAPI__ASM_AVR32_SOCKIOS_H */
diff --git a/arch/avr32/include/uapi/asm/stat.h b/arch/avr32/include/uapi/asm/stat.h
index e72881e10230..c06acef7fce7 100644
--- a/arch/avr32/include/uapi/asm/stat.h
+++ b/arch/avr32/include/uapi/asm/stat.h
@@ -5,8 +5,8 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8#ifndef __ASM_AVR32_STAT_H 8#ifndef _UAPI__ASM_AVR32_STAT_H
9#define __ASM_AVR32_STAT_H 9#define _UAPI__ASM_AVR32_STAT_H
10 10
11struct __old_kernel_stat { 11struct __old_kernel_stat {
12 unsigned short st_dev; 12 unsigned short st_dev;
@@ -76,4 +76,4 @@ struct stat64 {
76 unsigned long __unused2; 76 unsigned long __unused2;
77}; 77};
78 78
79#endif /* __ASM_AVR32_STAT_H */ 79#endif /* _UAPI__ASM_AVR32_STAT_H */
diff --git a/arch/avr32/include/uapi/asm/statfs.h b/arch/avr32/include/uapi/asm/statfs.h
deleted file mode 100644
index 2961bd18c50e..000000000000
--- a/arch/avr32/include/uapi/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_STATFS_H
2#define __ASM_AVR32_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif /* __ASM_AVR32_STATFS_H */
diff --git a/arch/avr32/include/uapi/asm/swab.h b/arch/avr32/include/uapi/asm/swab.h
index 14cc737bbca6..1a03549e7dc5 100644
--- a/arch/avr32/include/uapi/asm/swab.h
+++ b/arch/avr32/include/uapi/asm/swab.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * AVR32 byteswapping functions. 2 * AVR32 byteswapping functions.
3 */ 3 */
4#ifndef __ASM_AVR32_SWAB_H 4#ifndef _UAPI__ASM_AVR32_SWAB_H
5#define __ASM_AVR32_SWAB_H 5#define _UAPI__ASM_AVR32_SWAB_H
6 6
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/compiler.h> 8#include <linux/compiler.h>
@@ -32,4 +32,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
32#define __arch_swab32 __arch_swab32 32#define __arch_swab32 __arch_swab32
33#endif 33#endif
34 34
35#endif /* __ASM_AVR32_SWAB_H */ 35#endif /* _UAPI__ASM_AVR32_SWAB_H */
diff --git a/arch/avr32/include/uapi/asm/termbits.h b/arch/avr32/include/uapi/asm/termbits.h
index 366adc5ebb10..32789ccb38f8 100644
--- a/arch/avr32/include/uapi/asm/termbits.h
+++ b/arch/avr32/include/uapi/asm/termbits.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_AVR32_TERMBITS_H 1#ifndef _UAPI__ASM_AVR32_TERMBITS_H
2#define __ASM_AVR32_TERMBITS_H 2#define _UAPI__ASM_AVR32_TERMBITS_H
3 3
4#include <linux/posix_types.h> 4#include <linux/posix_types.h>
5 5
@@ -193,4 +193,4 @@ struct ktermios {
193#define TCSADRAIN 1 193#define TCSADRAIN 1
194#define TCSAFLUSH 2 194#define TCSAFLUSH 2
195 195
196#endif /* __ASM_AVR32_TERMBITS_H */ 196#endif /* _UAPI__ASM_AVR32_TERMBITS_H */
diff --git a/arch/avr32/include/uapi/asm/termios.h b/arch/avr32/include/uapi/asm/termios.h
index b8ef8ea63352..c8a0081556c4 100644
--- a/arch/avr32/include/uapi/asm/termios.h
+++ b/arch/avr32/include/uapi/asm/termios.h
@@ -46,5 +46,4 @@ struct termio {
46 46
47/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ 47/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
48 48
49
50#endif /* _UAPI__ASM_AVR32_TERMIOS_H */ 49#endif /* _UAPI__ASM_AVR32_TERMIOS_H */
diff --git a/arch/avr32/include/uapi/asm/types.h b/arch/avr32/include/uapi/asm/types.h
index bb34ad349dfc..7c986c4e99b5 100644
--- a/arch/avr32/include/uapi/asm/types.h
+++ b/arch/avr32/include/uapi/asm/types.h
@@ -5,4 +5,9 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8#ifndef _UAPI__ASM_AVR32_TYPES_H
9#define _UAPI__ASM_AVR32_TYPES_H
10
8#include <asm-generic/int-ll64.h> 11#include <asm-generic/int-ll64.h>
12
13#endif /* _UAPI__ASM_AVR32_TYPES_H */
diff --git a/arch/avr32/include/uapi/asm/unistd.h b/arch/avr32/include/uapi/asm/unistd.h
index 3eaa68753adb..8822bf46ddc6 100644
--- a/arch/avr32/include/uapi/asm/unistd.h
+++ b/arch/avr32/include/uapi/asm/unistd.h
@@ -301,5 +301,4 @@
301#define __NR_eventfd 281 301#define __NR_eventfd 281
302#define __NR_setns 283 302#define __NR_setns 283
303 303
304
305#endif /* _UAPI__ASM_AVR32_UNISTD_H */ 304#endif /* _UAPI__ASM_AVR32_UNISTD_H */
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 9899d3cc6f03..7301f4806bbe 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -401,9 +401,10 @@ handle_critical:
401 /* We should never get here... */ 401 /* We should never get here... */
402bad_return: 402bad_return:
403 sub r12, pc, (. - 1f) 403 sub r12, pc, (. - 1f)
404 bral panic 404 lddpc pc, 2f
405 .align 2 405 .align 2
4061: .asciz "Return from critical exception!" 4061: .asciz "Return from critical exception!"
4072: .long panic
407 408
408 .align 1 409 .align 1
409do_bus_error_write: 410do_bus_error_write:
diff --git a/arch/avr32/kernel/head.S b/arch/avr32/kernel/head.S
index 6163bd0acb95..59eae6dfbed2 100644
--- a/arch/avr32/kernel/head.S
+++ b/arch/avr32/kernel/head.S
@@ -10,33 +10,13 @@
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11 11
12#include <asm/page.h> 12#include <asm/page.h>
13#include <asm/thread_info.h>
14#include <asm/sysreg.h>
15 13
16 .section .init.text,"ax" 14 .section .init.text,"ax"
17 .global kernel_entry 15 .global kernel_entry
18kernel_entry: 16kernel_entry:
19 /* Initialize status register */
20 lddpc r0, init_sr
21 mtsr SYSREG_SR, r0
22
23 /* Set initial stack pointer */
24 lddpc sp, stack_addr
25 sub sp, -THREAD_SIZE
26
27#ifdef CONFIG_FRAME_POINTER
28 /* Mark last stack frame */
29 mov lr, 0
30 mov r7, 0
31#endif
32
33 /* Start the show */ 17 /* Start the show */
34 lddpc pc, kernel_start_addr 18 lddpc pc, kernel_start_addr
35 19
36 .align 2 20 .align 2
37init_sr:
38 .long 0x007f0000 /* Supervisor mode, everything masked */
39stack_addr:
40 .long init_thread_union
41kernel_start_addr: 21kernel_start_addr:
42 .long start_kernel 22 .long start_kernel
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 12f828ad5058..d0f771be9e96 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -59,7 +59,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
59static struct irqaction timer_irqaction = { 59static struct irqaction timer_irqaction = {
60 .handler = timer_interrupt, 60 .handler = timer_interrupt,
61 /* Oprofile uses the same irq as the timer, so allow it to be shared */ 61 /* Oprofile uses the same irq as the timer, so allow it to be shared */
62 .flags = IRQF_TIMER | IRQF_DISABLED | IRQF_SHARED, 62 .flags = IRQF_TIMER | IRQF_SHARED,
63 .name = "avr32_comparator", 63 .name = "avr32_comparator",
64}; 64};
65 65
diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
index 32d680eb6f48..db190842b80c 100644
--- a/arch/avr32/mach-at32ap/pm.c
+++ b/arch/avr32/mach-at32ap/pm.c
@@ -181,7 +181,7 @@ static const struct platform_suspend_ops avr32_pm_ops = {
181 .enter = avr32_pm_enter, 181 .enter = avr32_pm_enter,
182}; 182};
183 183
184static unsigned long avr32_pm_offset(void *symbol) 184static unsigned long __init avr32_pm_offset(void *symbol)
185{ 185{
186 extern u8 pm_exception[]; 186 extern u8 pm_exception[];
187 187
diff --git a/arch/blackfin/include/asm/hardirq.h b/arch/blackfin/include/asm/hardirq.h
index c078dd78d998..58b54a6d5a16 100644
--- a/arch/blackfin/include/asm/hardirq.h
+++ b/arch/blackfin/include/asm/hardirq.h
@@ -12,9 +12,6 @@
12extern void ack_bad_irq(unsigned int irq); 12extern void ack_bad_irq(unsigned int irq);
13#define ack_bad_irq ack_bad_irq 13#define ack_bad_irq ack_bad_irq
14 14
15/* Define until common code gets sane defaults */
16#define HARDIRQ_BITS 9
17
18#include <asm-generic/hardirq.h> 15#include <asm-generic/hardirq.h>
19 16
20#endif 17#endif
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 3894005337ba..55f473bdad36 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -88,8 +88,6 @@ static inline struct thread_info *current_thread_info(void)
88#define TI_CPU 12 88#define TI_CPU 12
89#define TI_PREEMPT 16 89#define TI_PREEMPT 16
90 90
91#define PREEMPT_ACTIVE 0x4000000
92
93/* 91/*
94 * thread information flag bit numbers 92 * thread information flag bit numbers
95 */ 93 */
diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h
index 4c8dc562bd90..d4e9ef87076d 100644
--- a/arch/c6x/include/asm/thread_info.h
+++ b/arch/c6x/include/asm/thread_info.h
@@ -84,8 +84,6 @@ struct thread_info *current_thread_info(void)
84#define put_thread_info(ti) put_task_struct((ti)->task) 84#define put_thread_info(ti) put_task_struct((ti)->task)
85#endif /* __ASSEMBLY__ */ 85#endif /* __ASSEMBLY__ */
86 86
87#define PREEMPT_ACTIVE 0x10000000
88
89/* 87/*
90 * thread information flag bit numbers 88 * thread information flag bit numbers
91 * - pending work-to-be-done flags are in LSW 89 * - pending work-to-be-done flags are in LSW
diff --git a/arch/cris/include/asm/hardirq.h b/arch/cris/include/asm/hardirq.h
index 17bb12d760b2..04126f7bfab2 100644
--- a/arch/cris/include/asm/hardirq.h
+++ b/arch/cris/include/asm/hardirq.h
@@ -2,18 +2,6 @@
2#define __ASM_HARDIRQ_H 2#define __ASM_HARDIRQ_H
3 3
4#include <asm/irq.h> 4#include <asm/irq.h>
5
6#define HARDIRQ_BITS 8
7
8/*
9 * The hardirq mask has to be large enough to have
10 * space for potentially all IRQ sources in the system
11 * nesting on a single CPU:
12 */
13#if (1 << HARDIRQ_BITS) < NR_IRQS
14# error HARDIRQ_BITS is too low!
15#endif
16
17#include <asm-generic/hardirq.h> 5#include <asm-generic/hardirq.h>
18 6
19#endif /* __ASM_HARDIRQ_H */ 7#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 07c8c40c52b3..55dede18c032 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -44,8 +44,6 @@ struct thread_info {
44 44
45#endif 45#endif
46 46
47#define PREEMPT_ACTIVE 0x10000000
48
49/* 47/*
50 * macros/functions for gaining access to the thread information structure 48 * macros/functions for gaining access to the thread information structure
51 */ 49 */
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index bebd7eadc772..af29e17c0181 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -52,8 +52,6 @@ struct thread_info {
52 52
53#endif 53#endif
54 54
55#define PREEMPT_ACTIVE 0x10000000
56
57/* 55/*
58 * macros/functions for gaining access to the thread information structure 56 * macros/functions for gaining access to the thread information structure
59 */ 57 */
diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h
index f7c32406a711..a59dad3b3695 100644
--- a/arch/hexagon/include/asm/thread_info.h
+++ b/arch/hexagon/include/asm/thread_info.h
@@ -73,10 +73,6 @@ struct thread_info {
73 73
74#endif /* __ASSEMBLY__ */ 74#endif /* __ASSEMBLY__ */
75 75
76/* looks like "linux/hardirq.h" uses this. */
77
78#define PREEMPT_ACTIVE 0x10000000
79
80#ifndef __ASSEMBLY__ 76#ifndef __ASSEMBLY__
81 77
82#define INIT_THREAD_INFO(tsk) \ 78#define INIT_THREAD_INFO(tsk) \
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index d43daf192b21..4c530a82fc46 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1992,7 +1992,7 @@ sba_connect_bus(struct pci_bus *bus)
1992 if (PCI_CONTROLLER(bus)->iommu) 1992 if (PCI_CONTROLLER(bus)->iommu)
1993 return; 1993 return;
1994 1994
1995 handle = PCI_CONTROLLER(bus)->acpi_handle; 1995 handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
1996 if (!handle) 1996 if (!handle)
1997 return; 1997 return;
1998 1998
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 80775f55f03f..71fbaaa495cc 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -95,7 +95,7 @@ struct iospace_resource {
95}; 95};
96 96
97struct pci_controller { 97struct pci_controller {
98 void *acpi_handle; 98 struct acpi_device *companion;
99 void *iommu; 99 void *iommu;
100 int segment; 100 int segment;
101 int node; /* nearest node with memory or -1 for global allocation */ 101 int node; /* nearest node with memory or -1 for global allocation */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index cade13dd0299..5957cf61f898 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -11,9 +11,6 @@
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13 13
14#define PREEMPT_ACTIVE_BIT 30
15#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
16
17#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
18 15
19/* 16/*
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 7a53530f22c2..ddea607f948a 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1169,21 +1169,8 @@ skip_rbs_switch:
1169.work_pending: 1169.work_pending:
1170 tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed? 1170 tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed?
1171(p6) br.cond.sptk.few .notify 1171(p6) br.cond.sptk.few .notify
1172#ifdef CONFIG_PREEMPT 1172 br.call.spnt.many rp=preempt_schedule_irq
1173(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
1174 ;;
1175(pKStk) st4 [r20]=r21
1176#endif
1177 SSM_PSR_I(p0, p6, r2) // enable interrupts
1178 br.call.spnt.many rp=schedule
1179.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) 1173.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1180 RSM_PSR_I(p0, r2, r20) // disable interrupts
1181 ;;
1182#ifdef CONFIG_PREEMPT
1183(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
1184 ;;
1185(pKStk) st4 [r20]=r0 // preempt_count() <- 0
1186#endif
1187(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end 1174(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1188 br.cond.sptk.many .work_processed_kernel 1175 br.cond.sptk.many .work_processed_kernel
1189 1176
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 5a9ff1c3c3e9..cb592773c78b 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2166,12 +2166,6 @@ static const struct file_operations pfm_file_ops = {
2166 .flush = pfm_flush 2166 .flush = pfm_flush
2167}; 2167};
2168 2168
2169static int
2170pfmfs_delete_dentry(const struct dentry *dentry)
2171{
2172 return 1;
2173}
2174
2175static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen) 2169static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2176{ 2170{
2177 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]", 2171 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
@@ -2179,7 +2173,7 @@ static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2179} 2173}
2180 2174
2181static const struct dentry_operations pfmfs_dentry_operations = { 2175static const struct dentry_operations pfmfs_dentry_operations = {
2182 .d_delete = pfmfs_delete_dentry, 2176 .d_delete = always_delete_dentry,
2183 .d_dname = pfmfs_dname, 2177 .d_dname = pfmfs_dname,
2184}; 2178};
2185 2179
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 2326790b7d8b..9e4938d8ca4d 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -436,9 +436,9 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
436 if (!controller) 436 if (!controller)
437 return NULL; 437 return NULL;
438 438
439 controller->acpi_handle = device->handle; 439 controller->companion = device;
440 440
441 pxm = acpi_get_pxm(controller->acpi_handle); 441 pxm = acpi_get_pxm(device->handle);
442#ifdef CONFIG_NUMA 442#ifdef CONFIG_NUMA
443 if (pxm >= 0) 443 if (pxm >= 0)
444 controller->node = pxm_to_node(pxm); 444 controller->node = pxm_to_node(pxm);
@@ -489,7 +489,7 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
489{ 489{
490 struct pci_controller *controller = bridge->bus->sysdata; 490 struct pci_controller *controller = bridge->bus->sysdata;
491 491
492 ACPI_HANDLE_SET(&bridge->dev, controller->acpi_handle); 492 ACPI_COMPANION_SET(&bridge->dev, controller->companion);
493 return 0; 493 return 0;
494} 494}
495 495
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index b1725398b5af..0640739cc20c 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -132,7 +132,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
132 struct acpi_resource_vendor_typed *vendor; 132 struct acpi_resource_vendor_typed *vendor;
133 133
134 134
135 handle = PCI_CONTROLLER(bus)->acpi_handle; 135 handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
136 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, 136 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
137 &sn_uuid, &buffer); 137 &sn_uuid, &buffer);
138 if (ACPI_FAILURE(status)) { 138 if (ACPI_FAILURE(status)) {
@@ -360,7 +360,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
360 acpi_status status; 360 acpi_status status;
361 struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 361 struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
362 362
363 rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle; 363 rootbus_handle = acpi_device_handle(PCI_CONTROLLER(dev)->companion);
364 status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL, 364 status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
365 &segment); 365 &segment);
366 if (ACPI_SUCCESS(status)) { 366 if (ACPI_SUCCESS(status)) {
diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h
index 4c31c0ae215e..5f2ac4f64ddf 100644
--- a/arch/m32r/include/asm/hardirq.h
+++ b/arch/m32r/include/asm/hardirq.h
@@ -3,22 +3,6 @@
3#define __ASM_HARDIRQ_H 3#define __ASM_HARDIRQ_H
4 4
5#include <asm/irq.h> 5#include <asm/irq.h>
6
7#if NR_IRQS > 256
8#define HARDIRQ_BITS 9
9#else
10#define HARDIRQ_BITS 8
11#endif
12
13/*
14 * The hardirq mask has to be large enough to have
15 * space for potentially all IRQ sources in the system
16 * nesting on a single CPU:
17 */
18#if (1 << HARDIRQ_BITS) < NR_IRQS
19# error HARDIRQ_BITS is too low!
20#endif
21
22#include <asm-generic/hardirq.h> 6#include <asm-generic/hardirq.h>
23 7
24#endif /* __ASM_HARDIRQ_H */ 8#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index c074f4c2e858..00171703402f 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -53,8 +53,6 @@ struct thread_info {
53 53
54#endif 54#endif
55 55
56#define PREEMPT_ACTIVE 0x10000000
57
58#define THREAD_SIZE (PAGE_SIZE << 1) 56#define THREAD_SIZE (PAGE_SIZE << 1)
59#define THREAD_SIZE_ORDER 1 57#define THREAD_SIZE_ORDER 1
60/* 58/*
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 0c01543f10cd..7c3db9940ce1 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -182,13 +182,7 @@ need_resched:
182 ld r4, PSW(sp) ; interrupts off (exception path) ? 182 ld r4, PSW(sp) ; interrupts off (exception path) ?
183 and3 r4, r4, #0x4000 183 and3 r4, r4, #0x4000
184 beqz r4, restore_all 184 beqz r4, restore_all
185 LDIMM (r4, PREEMPT_ACTIVE) 185 bl preempt_schedule_irq
186 st r4, @(TI_PRE_COUNT, r8)
187 ENABLE_INTERRUPTS(r4)
188 bl schedule
189 ldi r4, #0
190 st r4, @(TI_PRE_COUNT, r8)
191 DISABLE_INTERRUPTS(r4)
192 bra need_resched 186 bra need_resched
193#endif 187#endif
194 188
diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
index db30ed276878..6c618529d9b9 100644
--- a/arch/m68k/include/asm/hardirq.h
+++ b/arch/m68k/include/asm/hardirq.h
@@ -5,17 +5,6 @@
5#include <linux/cache.h> 5#include <linux/cache.h>
6#include <asm/irq.h> 6#include <asm/irq.h>
7 7
8#define HARDIRQ_BITS 8
9
10/*
11 * The hardirq mask has to be large enough to have
12 * space for potentially all IRQ sources in the system
13 * nesting on a single CPU:
14 */
15#if (1 << HARDIRQ_BITS) < NR_IRQS
16# error HARDIRQ_BITS is too low!
17#endif
18
19#ifdef CONFIG_MMU 8#ifdef CONFIG_MMU
20 9
21static inline void ack_bad_irq(unsigned int irq) 10static inline void ack_bad_irq(unsigned int irq)
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 126131f94a2c..21a4784ca5a1 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -35,8 +35,6 @@ struct thread_info {
35}; 35};
36#endif /* __ASSEMBLY__ */ 36#endif /* __ASSEMBLY__ */
37 37
38#define PREEMPT_ACTIVE 0x4000000
39
40#define INIT_THREAD_INFO(tsk) \ 38#define INIT_THREAD_INFO(tsk) \
41{ \ 39{ \
42 .task = &tsk, \ 40 .task = &tsk, \
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index a78f5649e8de..b54ac7aba850 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -45,7 +45,7 @@
45.globl system_call, buserr, trap, resume 45.globl system_call, buserr, trap, resume
46.globl sys_call_table 46.globl sys_call_table
47.globl __sys_fork, __sys_clone, __sys_vfork 47.globl __sys_fork, __sys_clone, __sys_vfork
48.globl ret_from_interrupt, bad_interrupt 48.globl bad_interrupt
49.globl auto_irqhandler_fixup 49.globl auto_irqhandler_fixup
50.globl user_irqvec_fixup 50.globl user_irqvec_fixup
51 51
@@ -275,8 +275,6 @@ do_delayed_trace:
275ENTRY(auto_inthandler) 275ENTRY(auto_inthandler)
276 SAVE_ALL_INT 276 SAVE_ALL_INT
277 GET_CURRENT(%d0) 277 GET_CURRENT(%d0)
278 movel %d0,%a1
279 addqb #1,%a1@(TINFO_PREEMPT+1)
280 | put exception # in d0 278 | put exception # in d0
281 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 279 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
282 subw #VEC_SPUR,%d0 280 subw #VEC_SPUR,%d0
@@ -286,32 +284,13 @@ ENTRY(auto_inthandler)
286auto_irqhandler_fixup = . + 2 284auto_irqhandler_fixup = . + 2
287 jsr do_IRQ | process the IRQ 285 jsr do_IRQ | process the IRQ
288 addql #8,%sp | pop parameters off stack 286 addql #8,%sp | pop parameters off stack
289 287 jra ret_from_exception
290ret_from_interrupt:
291 movel %curptr@(TASK_STACK),%a1
292 subqb #1,%a1@(TINFO_PREEMPT+1)
293 jeq ret_from_last_interrupt
2942: RESTORE_ALL
295
296 ALIGN
297ret_from_last_interrupt:
298 moveq #(~ALLOWINT>>8)&0xff,%d0
299 andb %sp@(PT_OFF_SR),%d0
300 jne 2b
301
302 /* check if we need to do software interrupts */
303 tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
304 jeq .Lret_from_exception
305 pea ret_from_exception
306 jra do_softirq
307 288
308/* Handler for user defined interrupt vectors */ 289/* Handler for user defined interrupt vectors */
309 290
310ENTRY(user_inthandler) 291ENTRY(user_inthandler)
311 SAVE_ALL_INT 292 SAVE_ALL_INT
312 GET_CURRENT(%d0) 293 GET_CURRENT(%d0)
313 movel %d0,%a1
314 addqb #1,%a1@(TINFO_PREEMPT+1)
315 | put exception # in d0 294 | put exception # in d0
316 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 295 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
317user_irqvec_fixup = . + 2 296user_irqvec_fixup = . + 2
@@ -321,29 +300,18 @@ user_irqvec_fixup = . + 2
321 movel %d0,%sp@- | put vector # on stack 300 movel %d0,%sp@- | put vector # on stack
322 jsr do_IRQ | process the IRQ 301 jsr do_IRQ | process the IRQ
323 addql #8,%sp | pop parameters off stack 302 addql #8,%sp | pop parameters off stack
324 303 jra ret_from_exception
325 movel %curptr@(TASK_STACK),%a1
326 subqb #1,%a1@(TINFO_PREEMPT+1)
327 jeq ret_from_last_interrupt
328 RESTORE_ALL
329 304
330/* Handler for uninitialized and spurious interrupts */ 305/* Handler for uninitialized and spurious interrupts */
331 306
332ENTRY(bad_inthandler) 307ENTRY(bad_inthandler)
333 SAVE_ALL_INT 308 SAVE_ALL_INT
334 GET_CURRENT(%d0) 309 GET_CURRENT(%d0)
335 movel %d0,%a1
336 addqb #1,%a1@(TINFO_PREEMPT+1)
337 310
338 movel %sp,%sp@- 311 movel %sp,%sp@-
339 jsr handle_badint 312 jsr handle_badint
340 addql #4,%sp 313 addql #4,%sp
341 314 jra ret_from_exception
342 movel %curptr@(TASK_STACK),%a1
343 subqb #1,%a1@(TINFO_PREEMPT+1)
344 jeq ret_from_last_interrupt
345 RESTORE_ALL
346
347 315
348resume: 316resume:
349 /* 317 /*
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
index 4d7da384eea0..077d3a70fed1 100644
--- a/arch/m68k/kernel/ints.c
+++ b/arch/m68k/kernel/ints.c
@@ -58,12 +58,6 @@ void __init init_IRQ(void)
58{ 58{
59 int i; 59 int i;
60 60
61 /* assembly irq entry code relies on this... */
62 if (HARDIRQ_MASK != 0x00ff0000) {
63 extern void hardirq_mask_is_broken(void);
64 hardirq_mask_is_broken();
65 }
66
67 for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++) 61 for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++)
68 irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq); 62 irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq);
69 63
diff --git a/arch/m68k/platform/68000/entry.S b/arch/m68k/platform/68000/entry.S
index 7f91c2fde509..23ac054c6e1a 100644
--- a/arch/m68k/platform/68000/entry.S
+++ b/arch/m68k/platform/68000/entry.S
@@ -27,7 +27,6 @@
27.globl ret_from_exception 27.globl ret_from_exception
28.globl ret_from_signal 28.globl ret_from_signal
29.globl sys_call_table 29.globl sys_call_table
30.globl ret_from_interrupt
31.globl bad_interrupt 30.globl bad_interrupt
32.globl inthandler1 31.globl inthandler1
33.globl inthandler2 32.globl inthandler2
@@ -137,7 +136,7 @@ inthandler1:
137 movel #65,%sp@- /* put vector # on stack*/ 136 movel #65,%sp@- /* put vector # on stack*/
138 jbsr process_int /* process the IRQ*/ 137 jbsr process_int /* process the IRQ*/
1393: addql #8,%sp /* pop parameters off stack*/ 1383: addql #8,%sp /* pop parameters off stack*/
140 bra ret_from_interrupt 139 bra ret_from_exception
141 140
142inthandler2: 141inthandler2:
143 SAVE_ALL_INT 142 SAVE_ALL_INT
@@ -148,7 +147,7 @@ inthandler2:
148 movel #66,%sp@- /* put vector # on stack*/ 147 movel #66,%sp@- /* put vector # on stack*/
149 jbsr process_int /* process the IRQ*/ 148 jbsr process_int /* process the IRQ*/
1503: addql #8,%sp /* pop parameters off stack*/ 1493: addql #8,%sp /* pop parameters off stack*/
151 bra ret_from_interrupt 150 bra ret_from_exception
152 151
153inthandler3: 152inthandler3:
154 SAVE_ALL_INT 153 SAVE_ALL_INT
@@ -159,7 +158,7 @@ inthandler3:
159 movel #67,%sp@- /* put vector # on stack*/ 158 movel #67,%sp@- /* put vector # on stack*/
160 jbsr process_int /* process the IRQ*/ 159 jbsr process_int /* process the IRQ*/
1613: addql #8,%sp /* pop parameters off stack*/ 1603: addql #8,%sp /* pop parameters off stack*/
162 bra ret_from_interrupt 161 bra ret_from_exception
163 162
164inthandler4: 163inthandler4:
165 SAVE_ALL_INT 164 SAVE_ALL_INT
@@ -170,7 +169,7 @@ inthandler4:
170 movel #68,%sp@- /* put vector # on stack*/ 169 movel #68,%sp@- /* put vector # on stack*/
171 jbsr process_int /* process the IRQ*/ 170 jbsr process_int /* process the IRQ*/
1723: addql #8,%sp /* pop parameters off stack*/ 1713: addql #8,%sp /* pop parameters off stack*/
173 bra ret_from_interrupt 172 bra ret_from_exception
174 173
175inthandler5: 174inthandler5:
176 SAVE_ALL_INT 175 SAVE_ALL_INT
@@ -181,7 +180,7 @@ inthandler5:
181 movel #69,%sp@- /* put vector # on stack*/ 180 movel #69,%sp@- /* put vector # on stack*/
182 jbsr process_int /* process the IRQ*/ 181 jbsr process_int /* process the IRQ*/
1833: addql #8,%sp /* pop parameters off stack*/ 1823: addql #8,%sp /* pop parameters off stack*/
184 bra ret_from_interrupt 183 bra ret_from_exception
185 184
186inthandler6: 185inthandler6:
187 SAVE_ALL_INT 186 SAVE_ALL_INT
@@ -192,7 +191,7 @@ inthandler6:
192 movel #70,%sp@- /* put vector # on stack*/ 191 movel #70,%sp@- /* put vector # on stack*/
193 jbsr process_int /* process the IRQ*/ 192 jbsr process_int /* process the IRQ*/
1943: addql #8,%sp /* pop parameters off stack*/ 1933: addql #8,%sp /* pop parameters off stack*/
195 bra ret_from_interrupt 194 bra ret_from_exception
196 195
197inthandler7: 196inthandler7:
198 SAVE_ALL_INT 197 SAVE_ALL_INT
@@ -203,7 +202,7 @@ inthandler7:
203 movel #71,%sp@- /* put vector # on stack*/ 202 movel #71,%sp@- /* put vector # on stack*/
204 jbsr process_int /* process the IRQ*/ 203 jbsr process_int /* process the IRQ*/
2053: addql #8,%sp /* pop parameters off stack*/ 2043: addql #8,%sp /* pop parameters off stack*/
206 bra ret_from_interrupt 205 bra ret_from_exception
207 206
208inthandler: 207inthandler:
209 SAVE_ALL_INT 208 SAVE_ALL_INT
@@ -214,23 +213,7 @@ inthandler:
214 movel %d0,%sp@- /* put vector # on stack*/ 213 movel %d0,%sp@- /* put vector # on stack*/
215 jbsr process_int /* process the IRQ*/ 214 jbsr process_int /* process the IRQ*/
2163: addql #8,%sp /* pop parameters off stack*/ 2153: addql #8,%sp /* pop parameters off stack*/
217 bra ret_from_interrupt 216 bra ret_from_exception
218
219ret_from_interrupt:
220 jeq 1f
2212:
222 RESTORE_ALL
2231:
224 moveb %sp@(PT_OFF_SR), %d0
225 and #7, %d0
226 jhi 2b
227
228 /* check if we need to do software interrupts */
229 jeq ret_from_exception
230
231 pea ret_from_exception
232 jra do_softirq
233
234 217
235/* 218/*
236 * Handler for uninitialized and spurious interrupts. 219 * Handler for uninitialized and spurious interrupts.
diff --git a/arch/m68k/platform/68360/entry.S b/arch/m68k/platform/68360/entry.S
index 904fd9a4af4e..447c33ef37fd 100644
--- a/arch/m68k/platform/68360/entry.S
+++ b/arch/m68k/platform/68360/entry.S
@@ -29,7 +29,6 @@
29.globl ret_from_exception 29.globl ret_from_exception
30.globl ret_from_signal 30.globl ret_from_signal
31.globl sys_call_table 31.globl sys_call_table
32.globl ret_from_interrupt
33.globl bad_interrupt 32.globl bad_interrupt
34.globl inthandler 33.globl inthandler
35 34
@@ -132,26 +131,9 @@ inthandler:
132 131
133 movel %sp,%sp@- 132 movel %sp,%sp@-
134 movel %d0,%sp@- /* put vector # on stack*/ 133 movel %d0,%sp@- /* put vector # on stack*/
135 jbsr do_IRQ /* process the IRQ*/ 134 jbsr do_IRQ /* process the IRQ */
1363: addql #8,%sp /* pop parameters off stack*/ 135 addql #8,%sp /* pop parameters off stack*/
137 bra ret_from_interrupt 136 jra ret_from_exception
138
139ret_from_interrupt:
140 jeq 1f
1412:
142 RESTORE_ALL
1431:
144 moveb %sp@(PT_OFF_SR), %d0
145 and #7, %d0
146 jhi 2b
147 /* check if we need to do software interrupts */
148
149 movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0
150 jeq ret_from_exception
151
152 pea ret_from_exception
153 jra do_softirq
154
155 137
156/* 138/*
157 * Handler for uninitialized and spurious interrupts. 139 * Handler for uninitialized and spurious interrupts.
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
index 7c4a33006142..b19e9c588a16 100644
--- a/arch/metag/include/asm/thread_info.h
+++ b/arch/metag/include/asm/thread_info.h
@@ -46,8 +46,6 @@ struct thread_info {
46 46
47#endif 47#endif
48 48
49#define PREEMPT_ACTIVE 0x10000000
50
51#ifdef CONFIG_4KSTACKS 49#ifdef CONFIG_4KSTACKS
52#define THREAD_SHIFT 12 50#define THREAD_SHIFT 12
53#else 51#else
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index de26ea6373de..8c9d36591a03 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -106,8 +106,6 @@ static inline struct thread_info *current_thread_info(void)
106/* thread information allocation */ 106/* thread information allocation */
107#endif /* __ASSEMBLY__ */ 107#endif /* __ASSEMBLY__ */
108 108
109#define PREEMPT_ACTIVE 0x10000000
110
111/* 109/*
112 * thread information flags 110 * thread information flags
113 * - these are process state flags that various assembly files may 111 * - these are process state flags that various assembly files may
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index f9b24bfbdbae..4f58ef6d0eed 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -92,8 +92,6 @@ static inline struct thread_info *current_thread_info(void)
92 92
93#define STACK_WARN (THREAD_SIZE / 8) 93#define STACK_WARN (THREAD_SIZE / 8)
94 94
95#define PREEMPT_ACTIVE 0x10000000
96
97/* 95/*
98 * thread information flags 96 * thread information flags
99 * - these are process state flags that various assembly files may need to 97 * - these are process state flags that various assembly files may need to
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 224b4262486d..bf280eaccd36 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -16,8 +16,6 @@
16 16
17#include <asm/page.h> 17#include <asm/page.h>
18 18
19#define PREEMPT_ACTIVE 0x10000000
20
21#ifdef CONFIG_4KSTACKS 19#ifdef CONFIG_4KSTACKS
22#define THREAD_SIZE (4096) 20#define THREAD_SIZE (4096)
23#define THREAD_SIZE_ORDER (0) 21#define THREAD_SIZE_ORDER (0)
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index ec1b014952b6..acacd348df89 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -50,7 +50,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
50CONFIG_IDE=y 50CONFIG_IDE=y
51CONFIG_BLK_DEV_IDECD=y 51CONFIG_BLK_DEV_IDECD=y
52CONFIG_BLK_DEV_NS87415=y 52CONFIG_BLK_DEV_NS87415=y
53CONFIG_BLK_DEV_SIIMAGE=m 53CONFIG_PATA_SIL680=m
54CONFIG_SCSI=y 54CONFIG_SCSI=y
55CONFIG_BLK_DEV_SD=y 55CONFIG_BLK_DEV_SD=y
56CONFIG_CHR_DEV_ST=y 56CONFIG_CHR_DEV_ST=y
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index e1c8d2015c89..8249ac9d9cfc 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -20,7 +20,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
20CONFIG_MODVERSIONS=y 20CONFIG_MODVERSIONS=y
21CONFIG_BLK_DEV_INTEGRITY=y 21CONFIG_BLK_DEV_INTEGRITY=y
22CONFIG_PA8X00=y 22CONFIG_PA8X00=y
23CONFIG_MLONGCALLS=y
24CONFIG_64BIT=y 23CONFIG_64BIT=y
25CONFIG_SMP=y 24CONFIG_SMP=y
26CONFIG_PREEMPT=y 25CONFIG_PREEMPT=y
@@ -81,8 +80,6 @@ CONFIG_IDE=y
81CONFIG_BLK_DEV_IDECD=y 80CONFIG_BLK_DEV_IDECD=y
82CONFIG_BLK_DEV_PLATFORM=y 81CONFIG_BLK_DEV_PLATFORM=y
83CONFIG_BLK_DEV_GENERIC=y 82CONFIG_BLK_DEV_GENERIC=y
84CONFIG_BLK_DEV_SIIMAGE=y
85CONFIG_SCSI=y
86CONFIG_BLK_DEV_SD=y 83CONFIG_BLK_DEV_SD=y
87CONFIG_CHR_DEV_ST=m 84CONFIG_CHR_DEV_ST=m
88CONFIG_BLK_DEV_SR=m 85CONFIG_BLK_DEV_SR=m
@@ -94,6 +91,8 @@ CONFIG_SCSI_FC_ATTRS=y
94CONFIG_SCSI_SAS_LIBSAS=m 91CONFIG_SCSI_SAS_LIBSAS=m
95CONFIG_ISCSI_TCP=m 92CONFIG_ISCSI_TCP=m
96CONFIG_ISCSI_BOOT_SYSFS=m 93CONFIG_ISCSI_BOOT_SYSFS=m
94CONFIG_ATA=y
95CONFIG_PATA_SIL680=y
97CONFIG_FUSION=y 96CONFIG_FUSION=y
98CONFIG_FUSION_SPI=y 97CONFIG_FUSION_SPI=y
99CONFIG_FUSION_SAS=y 98CONFIG_FUSION_SAS=y
@@ -114,9 +113,8 @@ CONFIG_INPUT_FF_MEMLESS=m
114# CONFIG_KEYBOARD_ATKBD is not set 113# CONFIG_KEYBOARD_ATKBD is not set
115# CONFIG_KEYBOARD_HIL_OLD is not set 114# CONFIG_KEYBOARD_HIL_OLD is not set
116# CONFIG_KEYBOARD_HIL is not set 115# CONFIG_KEYBOARD_HIL is not set
117CONFIG_MOUSE_PS2=m 116# CONFIG_MOUSE_PS2 is not set
118CONFIG_INPUT_MISC=y 117CONFIG_INPUT_MISC=y
119CONFIG_INPUT_CM109=m
120CONFIG_SERIO_SERPORT=m 118CONFIG_SERIO_SERPORT=m
121CONFIG_SERIO_PARKBD=m 119CONFIG_SERIO_PARKBD=m
122CONFIG_SERIO_GSCPS2=m 120CONFIG_SERIO_GSCPS2=m
@@ -167,34 +165,6 @@ CONFIG_SND_VERBOSE_PRINTK=y
167CONFIG_SND_AD1889=m 165CONFIG_SND_AD1889=m
168# CONFIG_SND_USB is not set 166# CONFIG_SND_USB is not set
169# CONFIG_SND_GSC is not set 167# CONFIG_SND_GSC is not set
170CONFIG_HID_A4TECH=m
171CONFIG_HID_APPLE=m
172CONFIG_HID_BELKIN=m
173CONFIG_HID_CHERRY=m
174CONFIG_HID_CHICONY=m
175CONFIG_HID_CYPRESS=m
176CONFIG_HID_DRAGONRISE=m
177CONFIG_HID_EZKEY=m
178CONFIG_HID_KYE=m
179CONFIG_HID_GYRATION=m
180CONFIG_HID_TWINHAN=m
181CONFIG_HID_KENSINGTON=m
182CONFIG_HID_LOGITECH=m
183CONFIG_HID_LOGITECH_DJ=m
184CONFIG_HID_MICROSOFT=m
185CONFIG_HID_MONTEREY=m
186CONFIG_HID_NTRIG=m
187CONFIG_HID_ORTEK=m
188CONFIG_HID_PANTHERLORD=m
189CONFIG_HID_PETALYNX=m
190CONFIG_HID_SAMSUNG=m
191CONFIG_HID_SUNPLUS=m
192CONFIG_HID_GREENASIA=m
193CONFIG_HID_SMARTJOYPLUS=m
194CONFIG_HID_TOPSEED=m
195CONFIG_HID_THRUSTMASTER=m
196CONFIG_HID_ZEROPLUS=m
197CONFIG_USB_HID=m
198CONFIG_USB=y 168CONFIG_USB=y
199CONFIG_USB_OHCI_HCD=y 169CONFIG_USB_OHCI_HCD=y
200CONFIG_USB_STORAGE=y 170CONFIG_USB_STORAGE=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 5874cebee077..28c1b5de044e 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -24,7 +24,6 @@ CONFIG_MODVERSIONS=y
24CONFIG_BLK_DEV_INTEGRITY=y 24CONFIG_BLK_DEV_INTEGRITY=y
25# CONFIG_IOSCHED_DEADLINE is not set 25# CONFIG_IOSCHED_DEADLINE is not set
26CONFIG_PA8X00=y 26CONFIG_PA8X00=y
27CONFIG_MLONGCALLS=y
28CONFIG_64BIT=y 27CONFIG_64BIT=y
29CONFIG_SMP=y 28CONFIG_SMP=y
30# CONFIG_COMPACTION is not set 29# CONFIG_COMPACTION is not set
@@ -68,7 +67,6 @@ CONFIG_IDE_GD=m
68CONFIG_IDE_GD_ATAPI=y 67CONFIG_IDE_GD_ATAPI=y
69CONFIG_BLK_DEV_IDECD=m 68CONFIG_BLK_DEV_IDECD=m
70CONFIG_BLK_DEV_NS87415=y 69CONFIG_BLK_DEV_NS87415=y
71CONFIG_BLK_DEV_SIIMAGE=y
72# CONFIG_SCSI_PROC_FS is not set 70# CONFIG_SCSI_PROC_FS is not set
73CONFIG_BLK_DEV_SD=y 71CONFIG_BLK_DEV_SD=y
74CONFIG_BLK_DEV_SR=y 72CONFIG_BLK_DEV_SR=y
@@ -82,6 +80,7 @@ CONFIG_SCSI_ZALON=y
82CONFIG_SCSI_QLA_ISCSI=m 80CONFIG_SCSI_QLA_ISCSI=m
83CONFIG_SCSI_DH=y 81CONFIG_SCSI_DH=y
84CONFIG_ATA=y 82CONFIG_ATA=y
83CONFIG_PATA_SIL680=y
85CONFIG_ATA_GENERIC=y 84CONFIG_ATA_GENERIC=y
86CONFIG_MD=y 85CONFIG_MD=y
87CONFIG_MD_LINEAR=m 86CONFIG_MD_LINEAR=m
@@ -162,7 +161,7 @@ CONFIG_SLIP_MODE_SLIP6=y
162CONFIG_INPUT_EVDEV=y 161CONFIG_INPUT_EVDEV=y
163# CONFIG_KEYBOARD_HIL_OLD is not set 162# CONFIG_KEYBOARD_HIL_OLD is not set
164# CONFIG_KEYBOARD_HIL is not set 163# CONFIG_KEYBOARD_HIL is not set
165# CONFIG_INPUT_MOUSE is not set 164# CONFIG_MOUSE_PS2 is not set
166CONFIG_INPUT_MISC=y 165CONFIG_INPUT_MISC=y
167CONFIG_SERIO_SERPORT=m 166CONFIG_SERIO_SERPORT=m
168# CONFIG_HP_SDC is not set 167# CONFIG_HP_SDC is not set
@@ -216,32 +215,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
216CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y 215CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
217CONFIG_LOGO=y 216CONFIG_LOGO=y
218# CONFIG_LOGO_LINUX_MONO is not set 217# CONFIG_LOGO_LINUX_MONO is not set
219CONFIG_HID=m
220CONFIG_HIDRAW=y 218CONFIG_HIDRAW=y
221CONFIG_HID_DRAGONRISE=m
222CONFIG_DRAGONRISE_FF=y
223CONFIG_HID_KYE=m
224CONFIG_HID_GYRATION=m
225CONFIG_HID_TWINHAN=m
226CONFIG_LOGITECH_FF=y
227CONFIG_LOGIRUMBLEPAD2_FF=y
228CONFIG_HID_NTRIG=m
229CONFIG_HID_PANTHERLORD=m
230CONFIG_PANTHERLORD_FF=y
231CONFIG_HID_PETALYNX=m
232CONFIG_HID_SAMSUNG=m
233CONFIG_HID_SONY=m
234CONFIG_HID_SUNPLUS=m
235CONFIG_HID_GREENASIA=m
236CONFIG_GREENASIA_FF=y
237CONFIG_HID_SMARTJOYPLUS=m
238CONFIG_SMARTJOYPLUS_FF=y
239CONFIG_HID_TOPSEED=m
240CONFIG_HID_THRUSTMASTER=m
241CONFIG_THRUSTMASTER_FF=y
242CONFIG_HID_ZEROPLUS=m
243CONFIG_ZEROPLUS_FF=y
244CONFIG_USB_HID=m
245CONFIG_HID_PID=y 219CONFIG_HID_PID=y
246CONFIG_USB_HIDDEV=y 220CONFIG_USB_HIDDEV=y
247CONFIG_USB=y 221CONFIG_USB=y
@@ -251,13 +225,8 @@ CONFIG_USB_DYNAMIC_MINORS=y
251CONFIG_USB_MON=m 225CONFIG_USB_MON=m
252CONFIG_USB_WUSB_CBAF=m 226CONFIG_USB_WUSB_CBAF=m
253CONFIG_USB_XHCI_HCD=m 227CONFIG_USB_XHCI_HCD=m
254CONFIG_USB_EHCI_HCD=m 228CONFIG_USB_EHCI_HCD=y
255CONFIG_USB_OHCI_HCD=m 229CONFIG_USB_OHCI_HCD=y
256CONFIG_USB_R8A66597_HCD=m
257CONFIG_USB_ACM=m
258CONFIG_USB_PRINTER=m
259CONFIG_USB_WDM=m
260CONFIG_USB_TMC=m
261CONFIG_NEW_LEDS=y 230CONFIG_NEW_LEDS=y
262CONFIG_LEDS_CLASS=y 231CONFIG_LEDS_CLASS=y
263CONFIG_LEDS_TRIGGERS=y 232CONFIG_LEDS_TRIGGERS=y
diff --git a/arch/parisc/include/asm/serial.h b/arch/parisc/include/asm/serial.h
index d7e3cc60dbc3..77e9b67c87ee 100644
--- a/arch/parisc/include/asm/serial.h
+++ b/arch/parisc/include/asm/serial.h
@@ -6,5 +6,3 @@
6 * This is used for 16550-compatible UARTs 6 * This is used for 16550-compatible UARTs
7 */ 7 */
8#define BASE_BAUD ( 1843200 / 16 ) 8#define BASE_BAUD ( 1843200 / 16 )
9
10#define SERIAL_PORT_DFNS
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
new file mode 100644
index 000000000000..748016cb122d
--- /dev/null
+++ b/arch/parisc/include/asm/socket.h
@@ -0,0 +1,11 @@
1#ifndef _ASM_SOCKET_H
2#define _ASM_SOCKET_H
3
4#include <uapi/asm/socket.h>
5
6/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
7 * have to define SOCK_NONBLOCK to a different value here.
8 */
9#define SOCK_NONBLOCK 0x40000000
10
11#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index bc7cf120106b..d5f97ea3a4e1 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -46,9 +46,6 @@ struct thread_info {
46#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 46#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
47#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) 47#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
48 48
49#define PREEMPT_ACTIVE_BIT 28
50#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
51
52/* 49/*
53 * thread information flags 50 * thread information flags
54 */ 51 */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 63f4dd0b49c2..4006964d8e12 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -4,14 +4,11 @@
4/* 4/*
5 * User space memory access functions 5 * User space memory access functions
6 */ 6 */
7#include <asm/processor.h>
8#include <asm/page.h> 7#include <asm/page.h>
9#include <asm/cache.h> 8#include <asm/cache.h>
10#include <asm/errno.h> 9#include <asm/errno.h>
11#include <asm-generic/uaccess-unaligned.h> 10#include <asm-generic/uaccess-unaligned.h>
12 11
13#include <linux/sched.h>
14
15#define VERIFY_READ 0 12#define VERIFY_READ 0
16#define VERIFY_WRITE 1 13#define VERIFY_WRITE 1
17 14
@@ -36,43 +33,12 @@ extern int __get_user_bad(void);
36extern int __put_kernel_bad(void); 33extern int __put_kernel_bad(void);
37extern int __put_user_bad(void); 34extern int __put_user_bad(void);
38 35
39 36static inline long access_ok(int type, const void __user * addr,
40/* 37 unsigned long size)
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
43 */
44static inline int __range_not_ok(unsigned long addr, unsigned long size,
45 unsigned long limit)
46{ 38{
47 unsigned long __newaddr = addr + size; 39 return 1;
48 return (__newaddr < addr || __newaddr > limit || size > limit);
49} 40}
50 41
51/**
52 * access_ok: - Checks if a user space pointer is valid
53 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
54 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
55 * to write to a block, it is always safe to read from it.
56 * @addr: User space pointer to start of block to check
57 * @size: Size of block to check
58 *
59 * Context: User context only. This function may sleep.
60 *
61 * Checks if a pointer to a block of memory in user space is valid.
62 *
63 * Returns true (nonzero) if the memory block may be valid, false (zero)
64 * if it is definitely invalid.
65 *
66 * Note that, depending on architecture, this function probably just
67 * checks that the pointer is in the user space range - after calling
68 * this function, memory access functions may still return -EFAULT.
69 */
70#define access_ok(type, addr, size) \
71( __chk_user_ptr(addr), \
72 !__range_not_ok((unsigned long) (__force void *) (addr), \
73 size, user_addr_max()) \
74)
75
76#define put_user __put_user 42#define put_user __put_user
77#define get_user __get_user 43#define get_user __get_user
78 44
@@ -253,11 +219,7 @@ extern long lstrnlen_user(const char __user *,long);
253/* 219/*
254 * Complex access routines -- macros 220 * Complex access routines -- macros
255 */ 221 */
256#ifdef CONFIG_COMPAT 222#define user_addr_max() (~0UL)
257#define user_addr_max() (TASK_SIZE)
258#else
259#define user_addr_max() (DEFAULT_TASK_SIZE)
260#endif
261 223
262#define strnlen_user lstrnlen_user 224#define strnlen_user lstrnlen_user
263#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL) 225#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 7c614d01f1fa..f33113a6141e 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SOCKET_H 1#ifndef _UAPI_ASM_SOCKET_H
2#define _ASM_SOCKET_H 2#define _UAPI_ASM_SOCKET_H
3 3
4#include <asm/sockios.h> 4#include <asm/sockios.h>
5 5
@@ -77,9 +77,4 @@
77 77
78#define SO_MAX_PACING_RATE 0x4048 78#define SO_MAX_PACING_RATE 0x4048
79 79
80/* O_NONBLOCK clashes with the bits used for socket types. Therefore we 80#endif /* _UAPI_ASM_SOCKET_H */
81 * have to define SOCK_NONBLOCK to a different value here.
82 */
83#define SOCK_NONBLOCK 0x40000000
84
85#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 06cb3992907e..608716f8496b 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -36,6 +36,9 @@
36 * HP PARISC Hardware Database 36 * HP PARISC Hardware Database
37 * Access to this database is only possible during bootup 37 * Access to this database is only possible during bootup
38 * so don't reference this table after starting the init process 38 * so don't reference this table after starting the init process
39 *
40 * NOTE: Product names which are listed here and ends with a '?'
41 * are guessed. If you know the correct name, please let us know.
39 */ 42 */
40 43
41static struct hp_hardware hp_hardware_list[] = { 44static struct hp_hardware hp_hardware_list[] = {
@@ -222,7 +225,7 @@ static struct hp_hardware hp_hardware_list[] = {
222 {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"}, 225 {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
223 {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"}, 226 {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
224 {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"}, 227 {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
225 {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"}, 228 {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)?"},
226 {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"}, 229 {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
227 {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"}, 230 {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
228 {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"}, 231 {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
@@ -276,9 +279,11 @@ static struct hp_hardware hp_hardware_list[] = {
276 {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"}, 279 {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
277 {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"}, 280 {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
278 {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"}, 281 {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"},
282 {HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast?"},
279 {HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"}, 283 {HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"},
280 {HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"}, 284 {HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"},
281 {HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"}, 285 {HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"},
286 {HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+?"},
282 {HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"}, 287 {HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"},
283 {HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"}, 288 {HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"},
284 {HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"}, 289 {HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"},
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index d2d58258aea6..d4dc588c0dc1 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -41,9 +41,7 @@ END(boot_args)
41 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */ 41 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
42 .import $global$ /* forward declaration */ 42 .import $global$ /* forward declaration */
43#endif /*!CONFIG_64BIT*/ 43#endif /*!CONFIG_64BIT*/
44 .export _stext,data /* Kernel want it this way! */ 44ENTRY(parisc_kernel_start)
45_stext:
46ENTRY(stext)
47 .proc 45 .proc
48 .callinfo 46 .callinfo
49 47
@@ -347,7 +345,7 @@ smp_slave_stext:
347 .procend 345 .procend
348#endif /* CONFIG_SMP */ 346#endif /* CONFIG_SMP */
349 347
350ENDPROC(stext) 348ENDPROC(parisc_kernel_start)
351 349
352#ifndef CONFIG_64BIT 350#ifndef CONFIG_64BIT
353 .section .data..read_mostly 351 .section .data..read_mostly
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 5dfd248e3f1a..0d3a9d4927b5 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -61,8 +61,15 @@ static int get_offset(struct address_space *mapping)
61 return (unsigned long) mapping >> 8; 61 return (unsigned long) mapping >> 8;
62} 62}
63 63
64static unsigned long get_shared_area(struct address_space *mapping, 64static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
65 unsigned long addr, unsigned long len, unsigned long pgoff) 65{
66 struct address_space *mapping = filp ? filp->f_mapping : NULL;
67
68 return (get_offset(mapping) + pgoff) << PAGE_SHIFT;
69}
70
71static unsigned long get_shared_area(struct file *filp, unsigned long addr,
72 unsigned long len, unsigned long pgoff)
66{ 73{
67 struct vm_unmapped_area_info info; 74 struct vm_unmapped_area_info info;
68 75
@@ -71,7 +78,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
71 info.low_limit = PAGE_ALIGN(addr); 78 info.low_limit = PAGE_ALIGN(addr);
72 info.high_limit = TASK_SIZE; 79 info.high_limit = TASK_SIZE;
73 info.align_mask = PAGE_MASK & (SHMLBA - 1); 80 info.align_mask = PAGE_MASK & (SHMLBA - 1);
74 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT; 81 info.align_offset = shared_align_offset(filp, pgoff);
75 return vm_unmapped_area(&info); 82 return vm_unmapped_area(&info);
76} 83}
77 84
@@ -82,20 +89,18 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
82 return -ENOMEM; 89 return -ENOMEM;
83 if (flags & MAP_FIXED) { 90 if (flags & MAP_FIXED) {
84 if ((flags & MAP_SHARED) && 91 if ((flags & MAP_SHARED) &&
85 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 92 (addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1))
86 return -EINVAL; 93 return -EINVAL;
87 return addr; 94 return addr;
88 } 95 }
89 if (!addr) 96 if (!addr)
90 addr = TASK_UNMAPPED_BASE; 97 addr = TASK_UNMAPPED_BASE;
91 98
92 if (filp) { 99 if (filp || (flags & MAP_SHARED))
93 addr = get_shared_area(filp->f_mapping, addr, len, pgoff); 100 addr = get_shared_area(filp, addr, len, pgoff);
94 } else if(flags & MAP_SHARED) { 101 else
95 addr = get_shared_area(NULL, addr, len, pgoff);
96 } else {
97 addr = get_unshared_area(addr, len); 102 addr = get_unshared_area(addr, len);
98 } 103
99 return addr; 104 return addr;
100} 105}
101 106
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 76ed62ed785b..ddd988b267a9 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -168,7 +168,7 @@ void unwind_table_remove(struct unwind_table *table)
168} 168}
169 169
170/* Called from setup_arch to import the kernel unwind info */ 170/* Called from setup_arch to import the kernel unwind info */
171int unwind_init(void) 171int __init unwind_init(void)
172{ 172{
173 long start, stop; 173 long start, stop;
174 register unsigned long gp __asm__ ("r27"); 174 register unsigned long gp __asm__ ("r27");
@@ -233,7 +233,6 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
233 e = find_unwind_entry(info->ip); 233 e = find_unwind_entry(info->ip);
234 if (e == NULL) { 234 if (e == NULL) {
235 unsigned long sp; 235 unsigned long sp;
236 extern char _stext[], _etext[];
237 236
238 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); 237 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
239 238
@@ -281,8 +280,7 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
281 break; 280 break;
282 info->prev_ip = tmp; 281 info->prev_ip = tmp;
283 sp = info->prev_sp; 282 sp = info->prev_sp;
284 } while (info->prev_ip < (unsigned long)_stext || 283 } while (!kernel_text_address(info->prev_ip));
285 info->prev_ip > (unsigned long)_etext);
286 284
287 info->rp = 0; 285 info->rp = 0;
288 286
@@ -435,9 +433,8 @@ unsigned long return_address(unsigned int level)
435 do { 433 do {
436 if (unwind_once(&info) < 0 || info.ip == 0) 434 if (unwind_once(&info) < 0 || info.ip == 0)
437 return 0; 435 return 0;
438 if (!__kernel_text_address(info.ip)) { 436 if (!kernel_text_address(info.ip))
439 return 0; 437 return 0;
440 }
441 } while (info.ip && level--); 438 } while (info.ip && level--);
442 439
443 return info.ip; 440 return info.ip;
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 4bb095a2f6fc..0dacc5ca555a 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -6,24 +6,19 @@
6 * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> 6 * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
7 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> 7 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
8 * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org> 8 * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
9 * Copyright (C) 2006 Helge Deller <deller@gmx.de> 9 * Copyright (C) 2006-2013 Helge Deller <deller@gmx.de>
10 * 10 */
11 * 11
12 * This program is free software; you can redistribute it and/or modify 12/*
13 * it under the terms of the GNU General Public License as published by 13 * Put page table entries (swapper_pg_dir) as the first thing in .bss. This
14 * the Free Software Foundation; either version 2 of the License, or 14 * will ensure that it has .bss alignment (PAGE_SIZE).
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */ 15 */
16#define BSS_FIRST_SECTIONS *(.data..vm0.pmd) \
17 *(.data..vm0.pgd) \
18 *(.data..vm0.pte)
19
26#include <asm-generic/vmlinux.lds.h> 20#include <asm-generic/vmlinux.lds.h>
21
27/* needed for the processor specific cache alignment size */ 22/* needed for the processor specific cache alignment size */
28#include <asm/cache.h> 23#include <asm/cache.h>
29#include <asm/page.h> 24#include <asm/page.h>
@@ -39,7 +34,7 @@ OUTPUT_FORMAT("elf64-hppa-linux")
39OUTPUT_ARCH(hppa:hppa2.0w) 34OUTPUT_ARCH(hppa:hppa2.0w)
40#endif 35#endif
41 36
42ENTRY(_stext) 37ENTRY(parisc_kernel_start)
43#ifndef CONFIG_64BIT 38#ifndef CONFIG_64BIT
44jiffies = jiffies_64 + 4; 39jiffies = jiffies_64 + 4;
45#else 40#else
@@ -49,11 +44,29 @@ SECTIONS
49{ 44{
50 . = KERNEL_BINARY_TEXT_START; 45 . = KERNEL_BINARY_TEXT_START;
51 46
47 __init_begin = .;
48 HEAD_TEXT_SECTION
49 INIT_TEXT_SECTION(8)
50
51 . = ALIGN(PAGE_SIZE);
52 INIT_DATA_SECTION(PAGE_SIZE)
53 /* we have to discard exit text and such at runtime, not link time */
54 .exit.text :
55 {
56 EXIT_TEXT
57 }
58 .exit.data :
59 {
60 EXIT_DATA
61 }
62 PERCPU_SECTION(8)
63 . = ALIGN(PAGE_SIZE);
64 __init_end = .;
65 /* freed after init ends here */
66
52 _text = .; /* Text and read-only data */ 67 _text = .; /* Text and read-only data */
53 .head ALIGN(16) : { 68 _stext = .;
54 HEAD_TEXT 69 .text ALIGN(PAGE_SIZE) : {
55 } = 0
56 .text ALIGN(16) : {
57 TEXT_TEXT 70 TEXT_TEXT
58 SCHED_TEXT 71 SCHED_TEXT
59 LOCK_TEXT 72 LOCK_TEXT
@@ -68,21 +81,28 @@ SECTIONS
68 *(.lock.text) /* out-of-line lock text */ 81 *(.lock.text) /* out-of-line lock text */
69 *(.gnu.warning) 82 *(.gnu.warning)
70 } 83 }
71 /* End of text section */ 84 . = ALIGN(PAGE_SIZE);
72 _etext = .; 85 _etext = .;
86 /* End of text section */
73 87
74 /* Start of data section */ 88 /* Start of data section */
75 _sdata = .; 89 _sdata = .;
76 90
77 RODATA 91 RO_DATA_SECTION(8)
78 92
79 /* writeable */ 93#ifdef CONFIG_64BIT
80 /* Make sure this is page aligned so 94 . = ALIGN(16);
81 * that we can properly leave these 95 /* Linkage tables */
82 * as writable 96 .opd : {
83 */ 97 *(.opd)
84 . = ALIGN(PAGE_SIZE); 98 } PROVIDE (__gp = .);
85 data_start = .; 99 .plt : {
100 *(.plt)
101 }
102 .dlt : {
103 *(.dlt)
104 }
105#endif
86 106
87 /* unwind info */ 107 /* unwind info */
88 .PARISC.unwind : { 108 .PARISC.unwind : {
@@ -91,7 +111,15 @@ SECTIONS
91 __stop___unwind = .; 111 __stop___unwind = .;
92 } 112 }
93 113
94 EXCEPTION_TABLE(16) 114 /* writeable */
115 /* Make sure this is page aligned so
116 * that we can properly leave these
117 * as writable
118 */
119 . = ALIGN(PAGE_SIZE);
120 data_start = .;
121
122 EXCEPTION_TABLE(8)
95 NOTES 123 NOTES
96 124
97 /* Data */ 125 /* Data */
@@ -107,54 +135,8 @@ SECTIONS
107 _edata = .; 135 _edata = .;
108 136
109 /* BSS */ 137 /* BSS */
110 __bss_start = .; 138 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
111 /* page table entries need to be PAGE_SIZE aligned */
112 . = ALIGN(PAGE_SIZE);
113 .data..vmpages : {
114 *(.data..vm0.pmd)
115 *(.data..vm0.pgd)
116 *(.data..vm0.pte)
117 }
118 .bss : {
119 *(.bss)
120 *(COMMON)
121 }
122 __bss_stop = .;
123
124#ifdef CONFIG_64BIT
125 . = ALIGN(16);
126 /* Linkage tables */
127 .opd : {
128 *(.opd)
129 } PROVIDE (__gp = .);
130 .plt : {
131 *(.plt)
132 }
133 .dlt : {
134 *(.dlt)
135 }
136#endif
137 139
138 /* reserve space for interrupt stack by aligning __init* to 16k */
139 . = ALIGN(16384);
140 __init_begin = .;
141 INIT_TEXT_SECTION(16384)
142 . = ALIGN(PAGE_SIZE);
143 INIT_DATA_SECTION(16)
144 /* we have to discard exit text and such at runtime, not link time */
145 .exit.text :
146 {
147 EXIT_TEXT
148 }
149 .exit.data :
150 {
151 EXIT_DATA
152 }
153
154 PERCPU_SECTION(L1_CACHE_BYTES)
155 . = ALIGN(PAGE_SIZE);
156 __init_end = .;
157 /* freed after init ends here */
158 _end = . ; 140 _end = . ;
159 141
160 STABS_DEBUG 142 STABS_DEBUG
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index b5507ec06b84..413dc1769299 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -161,7 +161,7 @@ static inline void prefetch_dst(const void *addr)
161/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words 161/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
162 * per loop. This code is derived from glibc. 162 * per loop. This code is derived from glibc.
163 */ 163 */
164static inline unsigned long copy_dstaligned(unsigned long dst, 164static noinline unsigned long copy_dstaligned(unsigned long dst,
165 unsigned long src, unsigned long len) 165 unsigned long src, unsigned long len)
166{ 166{
167 /* gcc complains that a2 and a3 may be uninitialized, but actually 167 /* gcc complains that a2 and a3 may be uninitialized, but actually
@@ -276,7 +276,7 @@ handle_store_error:
276/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. 276/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
277 * In case of an access fault the faulty address can be read from the per_cpu 277 * In case of an access fault the faulty address can be read from the per_cpu
278 * exception data struct. */ 278 * exception data struct. */
279static unsigned long pa_memcpy_internal(void *dstp, const void *srcp, 279static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
280 unsigned long len) 280 unsigned long len)
281{ 281{
282 register unsigned long src, dst, t1, t2, t3; 282 register unsigned long src, dst, t1, t2, t3;
@@ -529,7 +529,7 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
529{ 529{
530 unsigned long addr = (unsigned long)src; 530 unsigned long addr = (unsigned long)src;
531 531
532 if (size < 0 || addr < PAGE_SIZE) 532 if (addr < PAGE_SIZE)
533 return -EFAULT; 533 return -EFAULT;
534 534
535 /* check for I/O space F_EXTEND(0xfff00000) access as well? */ 535 /* check for I/O space F_EXTEND(0xfff00000) access as well? */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 7584a5df0fa4..9d08c71a967e 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -282,16 +282,34 @@ bad_area:
282#endif 282#endif
283 switch (code) { 283 switch (code) {
284 case 15: /* Data TLB miss fault/Data page fault */ 284 case 15: /* Data TLB miss fault/Data page fault */
285 /* send SIGSEGV when outside of vma */
286 if (!vma ||
287 address < vma->vm_start || address > vma->vm_end) {
288 si.si_signo = SIGSEGV;
289 si.si_code = SEGV_MAPERR;
290 break;
291 }
292
293 /* send SIGSEGV for wrong permissions */
294 if ((vma->vm_flags & acc_type) != acc_type) {
295 si.si_signo = SIGSEGV;
296 si.si_code = SEGV_ACCERR;
297 break;
298 }
299
300 /* probably address is outside of mapped file */
301 /* fall through */
285 case 17: /* NA data TLB miss / page fault */ 302 case 17: /* NA data TLB miss / page fault */
286 case 18: /* Unaligned access - PCXS only */ 303 case 18: /* Unaligned access - PCXS only */
287 si.si_signo = SIGBUS; 304 si.si_signo = SIGBUS;
288 si.si_code = BUS_ADRERR; 305 si.si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR;
289 break; 306 break;
290 case 16: /* Non-access instruction TLB miss fault */ 307 case 16: /* Non-access instruction TLB miss fault */
291 case 26: /* PCXL: Data memory access rights trap */ 308 case 26: /* PCXL: Data memory access rights trap */
292 default: 309 default:
293 si.si_signo = SIGSEGV; 310 si.si_signo = SIGSEGV;
294 si.si_code = SEGV_MAPERR; 311 si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR;
312 break;
295 } 313 }
296 si.si_errno = 0; 314 si.si_errno = 0;
297 si.si_addr = (void __user *) address; 315 si.si_addr = (void __user *) address;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index b0f96c0e6316..96f8168cf4ec 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -32,6 +32,7 @@
32#include <asm/sections.h> 32#include <asm/sections.h>
33 33
34extern int data_start; 34extern int data_start;
35extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
35 36
36#if PT_NLEVELS == 3 37#if PT_NLEVELS == 3
37/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout 38/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
@@ -324,8 +325,9 @@ static void __init setup_bootmem(void)
324 reserve_bootmem_node(NODE_DATA(0), 0UL, 325 reserve_bootmem_node(NODE_DATA(0), 0UL,
325 (unsigned long)(PAGE0->mem_free + 326 (unsigned long)(PAGE0->mem_free +
326 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT); 327 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
327 reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text), 328 reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START),
328 (unsigned long)(_end - _text), BOOTMEM_DEFAULT); 329 (unsigned long)(_end - KERNEL_BINARY_TEXT_START),
330 BOOTMEM_DEFAULT);
329 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), 331 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
330 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT), 332 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
331 BOOTMEM_DEFAULT); 333 BOOTMEM_DEFAULT);
@@ -378,6 +380,17 @@ static void __init setup_bootmem(void)
378 request_resource(&sysram_resources[0], &pdcdata_resource); 380 request_resource(&sysram_resources[0], &pdcdata_resource);
379} 381}
380 382
383static int __init parisc_text_address(unsigned long vaddr)
384{
385 static unsigned long head_ptr __initdata;
386
387 if (!head_ptr)
388 head_ptr = PAGE_MASK & (unsigned long)
389 dereference_function_descriptor(&parisc_kernel_start);
390
391 return core_kernel_text(vaddr) || vaddr == head_ptr;
392}
393
381static void __init map_pages(unsigned long start_vaddr, 394static void __init map_pages(unsigned long start_vaddr,
382 unsigned long start_paddr, unsigned long size, 395 unsigned long start_paddr, unsigned long size,
383 pgprot_t pgprot, int force) 396 pgprot_t pgprot, int force)
@@ -466,7 +479,7 @@ static void __init map_pages(unsigned long start_vaddr,
466 */ 479 */
467 if (force) 480 if (force)
468 pte = __mk_pte(address, pgprot); 481 pte = __mk_pte(address, pgprot);
469 else if (core_kernel_text(vaddr) && 482 else if (parisc_text_address(vaddr) &&
470 address != fv_addr) 483 address != fv_addr)
471 pte = __mk_pte(address, PAGE_KERNEL_EXEC); 484 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
472 else 485 else
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 607acf54a425..0f4344e6fbca 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -75,8 +75,10 @@ LDEMULATION := lppc
75GNUTARGET := powerpcle 75GNUTARGET := powerpcle
76MULTIPLEWORD := -mno-multiple 76MULTIPLEWORD := -mno-multiple
77else 77else
78ifeq ($(call cc-option-yn,-mbig-endian),y)
78override CC += -mbig-endian 79override CC += -mbig-endian
79override AS += -mbig-endian 80override AS += -mbig-endian
81endif
80override LD += -EB 82override LD += -EB
81LDEMULATION := ppc 83LDEMULATION := ppc
82GNUTARGET := powerpc 84GNUTARGET := powerpc
@@ -111,6 +113,7 @@ endif
111endif 113endif
112 114
113CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc 115CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc
116CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
114CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc) 117CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc)
115CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) 118CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
116CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) 119CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD)
@@ -127,7 +130,12 @@ CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
127CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) 130CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6)
128CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) 131CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
129 132
133# Altivec option not allowed with e500mc64 in GCC.
134ifeq ($(CONFIG_ALTIVEC),y)
135E5500_CPU := -mcpu=powerpc64
136else
130E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64) 137E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
138endif
131CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU) 139CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU)
132CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU)) 140CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
133 141
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 4c617bf8cdb2..4f6e48277c46 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -223,13 +223,13 @@
223 reg = <0xe2000 0x1000>; 223 reg = <0xe2000 0x1000>;
224 }; 224 };
225 225
226/include/ "qoriq-dma-0.dtsi" 226/include/ "elo3-dma-0.dtsi"
227 dma@100300 { 227 dma@100300 {
228 fsl,iommu-parent = <&pamu0>; 228 fsl,iommu-parent = <&pamu0>;
229 fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ 229 fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */
230 }; 230 };
231 231
232/include/ "qoriq-dma-1.dtsi" 232/include/ "elo3-dma-1.dtsi"
233 dma@101300 { 233 dma@101300 {
234 fsl,iommu-parent = <&pamu0>; 234 fsl,iommu-parent = <&pamu0>;
235 fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ 235 fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi
new file mode 100644
index 000000000000..3c210e0d5201
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi
@@ -0,0 +1,82 @@
1/*
2 * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x100000 ]
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35dma0: dma@100300 {
36 #address-cells = <1>;
37 #size-cells = <1>;
38 compatible = "fsl,elo3-dma";
39 reg = <0x100300 0x4>,
40 <0x100600 0x4>;
41 ranges = <0x0 0x100100 0x500>;
42 dma-channel@0 {
43 compatible = "fsl,eloplus-dma-channel";
44 reg = <0x0 0x80>;
45 interrupts = <28 2 0 0>;
46 };
47 dma-channel@80 {
48 compatible = "fsl,eloplus-dma-channel";
49 reg = <0x80 0x80>;
50 interrupts = <29 2 0 0>;
51 };
52 dma-channel@100 {
53 compatible = "fsl,eloplus-dma-channel";
54 reg = <0x100 0x80>;
55 interrupts = <30 2 0 0>;
56 };
57 dma-channel@180 {
58 compatible = "fsl,eloplus-dma-channel";
59 reg = <0x180 0x80>;
60 interrupts = <31 2 0 0>;
61 };
62 dma-channel@300 {
63 compatible = "fsl,eloplus-dma-channel";
64 reg = <0x300 0x80>;
65 interrupts = <76 2 0 0>;
66 };
67 dma-channel@380 {
68 compatible = "fsl,eloplus-dma-channel";
69 reg = <0x380 0x80>;
70 interrupts = <77 2 0 0>;
71 };
72 dma-channel@400 {
73 compatible = "fsl,eloplus-dma-channel";
74 reg = <0x400 0x80>;
75 interrupts = <78 2 0 0>;
76 };
77 dma-channel@480 {
78 compatible = "fsl,eloplus-dma-channel";
79 reg = <0x480 0x80>;
80 interrupts = <79 2 0 0>;
81 };
82};
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi
new file mode 100644
index 000000000000..cccf3bb38224
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi
@@ -0,0 +1,82 @@
1/*
2 * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x101000 ]
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35dma1: dma@101300 {
36 #address-cells = <1>;
37 #size-cells = <1>;
38 compatible = "fsl,elo3-dma";
39 reg = <0x101300 0x4>,
40 <0x101600 0x4>;
41 ranges = <0x0 0x101100 0x500>;
42 dma-channel@0 {
43 compatible = "fsl,eloplus-dma-channel";
44 reg = <0x0 0x80>;
45 interrupts = <32 2 0 0>;
46 };
47 dma-channel@80 {
48 compatible = "fsl,eloplus-dma-channel";
49 reg = <0x80 0x80>;
50 interrupts = <33 2 0 0>;
51 };
52 dma-channel@100 {
53 compatible = "fsl,eloplus-dma-channel";
54 reg = <0x100 0x80>;
55 interrupts = <34 2 0 0>;
56 };
57 dma-channel@180 {
58 compatible = "fsl,eloplus-dma-channel";
59 reg = <0x180 0x80>;
60 interrupts = <35 2 0 0>;
61 };
62 dma-channel@300 {
63 compatible = "fsl,eloplus-dma-channel";
64 reg = <0x300 0x80>;
65 interrupts = <80 2 0 0>;
66 };
67 dma-channel@380 {
68 compatible = "fsl,eloplus-dma-channel";
69 reg = <0x380 0x80>;
70 interrupts = <81 2 0 0>;
71 };
72 dma-channel@400 {
73 compatible = "fsl,eloplus-dma-channel";
74 reg = <0x400 0x80>;
75 interrupts = <82 2 0 0>;
76 };
77 dma-channel@480 {
78 compatible = "fsl,eloplus-dma-channel";
79 reg = <0x480 0x80>;
80 interrupts = <83 2 0 0>;
81 };
82};
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index 510afa362de1..4143a9733cd0 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -387,8 +387,8 @@
387 reg = <0xea000 0x4000>; 387 reg = <0xea000 0x4000>;
388 }; 388 };
389 389
390/include/ "qoriq-dma-0.dtsi" 390/include/ "elo3-dma-0.dtsi"
391/include/ "qoriq-dma-1.dtsi" 391/include/ "elo3-dma-1.dtsi"
392 392
393/include/ "qoriq-espi-0.dtsi" 393/include/ "qoriq-espi-0.dtsi"
394 spi@110000 { 394 spi@110000 {
diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi
index bd14c00e5146..2d7cb04ac962 100644
--- a/arch/powerpc/boot/dts/mpc5121.dtsi
+++ b/arch/powerpc/boot/dts/mpc5121.dtsi
@@ -77,7 +77,6 @@
77 compatible = "fsl,mpc5121-immr"; 77 compatible = "fsl,mpc5121-immr";
78 #address-cells = <1>; 78 #address-cells = <1>;
79 #size-cells = <1>; 79 #size-cells = <1>;
80 #interrupt-cells = <2>;
81 ranges = <0x0 0x80000000 0x400000>; 80 ranges = <0x0 0x80000000 0x400000>;
82 reg = <0x80000000 0x400000>; 81 reg = <0x80000000 0x400000>;
83 bus-frequency = <66000000>; /* 66 MHz ips bus */ 82 bus-frequency = <66000000>; /* 66 MHz ips bus */
diff --git a/arch/powerpc/boot/dts/xcalibur1501.dts b/arch/powerpc/boot/dts/xcalibur1501.dts
index cc00f4ddd9a7..c409cbafb126 100644
--- a/arch/powerpc/boot/dts/xcalibur1501.dts
+++ b/arch/powerpc/boot/dts/xcalibur1501.dts
@@ -637,14 +637,14 @@
637 tlu@2f000 { 637 tlu@2f000 {
638 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 638 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
639 reg = <0x2f000 0x1000>; 639 reg = <0x2f000 0x1000>;
640 interupts = <61 2 >; 640 interrupts = <61 2>;
641 interrupt-parent = <&mpic>; 641 interrupt-parent = <&mpic>;
642 }; 642 };
643 643
644 tlu@15000 { 644 tlu@15000 {
645 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 645 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
646 reg = <0x15000 0x1000>; 646 reg = <0x15000 0x1000>;
647 interupts = <75 2>; 647 interrupts = <75 2>;
648 interrupt-parent = <&mpic>; 648 interrupt-parent = <&mpic>;
649 }; 649 };
650 }; 650 };
diff --git a/arch/powerpc/boot/dts/xpedite5301.dts b/arch/powerpc/boot/dts/xpedite5301.dts
index 53c1c6a9752f..04cb410da48b 100644
--- a/arch/powerpc/boot/dts/xpedite5301.dts
+++ b/arch/powerpc/boot/dts/xpedite5301.dts
@@ -547,14 +547,14 @@
547 tlu@2f000 { 547 tlu@2f000 {
548 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 548 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
549 reg = <0x2f000 0x1000>; 549 reg = <0x2f000 0x1000>;
550 interupts = <61 2 >; 550 interrupts = <61 2>;
551 interrupt-parent = <&mpic>; 551 interrupt-parent = <&mpic>;
552 }; 552 };
553 553
554 tlu@15000 { 554 tlu@15000 {
555 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 555 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
556 reg = <0x15000 0x1000>; 556 reg = <0x15000 0x1000>;
557 interupts = <75 2>; 557 interrupts = <75 2>;
558 interrupt-parent = <&mpic>; 558 interrupt-parent = <&mpic>;
559 }; 559 };
560 }; 560 };
diff --git a/arch/powerpc/boot/dts/xpedite5330.dts b/arch/powerpc/boot/dts/xpedite5330.dts
index 215225983150..73f8620f1ce7 100644
--- a/arch/powerpc/boot/dts/xpedite5330.dts
+++ b/arch/powerpc/boot/dts/xpedite5330.dts
@@ -583,14 +583,14 @@
583 tlu@2f000 { 583 tlu@2f000 {
584 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 584 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
585 reg = <0x2f000 0x1000>; 585 reg = <0x2f000 0x1000>;
586 interupts = <61 2 >; 586 interrupts = <61 2>;
587 interrupt-parent = <&mpic>; 587 interrupt-parent = <&mpic>;
588 }; 588 };
589 589
590 tlu@15000 { 590 tlu@15000 {
591 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 591 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
592 reg = <0x15000 0x1000>; 592 reg = <0x15000 0x1000>;
593 interupts = <75 2>; 593 interrupts = <75 2>;
594 interrupt-parent = <&mpic>; 594 interrupt-parent = <&mpic>;
595 }; 595 };
596 }; 596 };
diff --git a/arch/powerpc/boot/dts/xpedite5370.dts b/arch/powerpc/boot/dts/xpedite5370.dts
index 11dbda10d756..cd0ea2b99362 100644
--- a/arch/powerpc/boot/dts/xpedite5370.dts
+++ b/arch/powerpc/boot/dts/xpedite5370.dts
@@ -545,14 +545,14 @@
545 tlu@2f000 { 545 tlu@2f000 {
546 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 546 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
547 reg = <0x2f000 0x1000>; 547 reg = <0x2f000 0x1000>;
548 interupts = <61 2 >; 548 interrupts = <61 2>;
549 interrupt-parent = <&mpic>; 549 interrupt-parent = <&mpic>;
550 }; 550 };
551 551
552 tlu@15000 { 552 tlu@15000 {
553 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 553 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
554 reg = <0x15000 0x1000>; 554 reg = <0x15000 0x1000>;
555 interupts = <75 2>; 555 interrupts = <75 2>;
556 interrupt-parent = <&mpic>; 556 interrupt-parent = <&mpic>;
557 }; 557 };
558 }; 558 };
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
index 5143228e3e5f..6636b1d7821b 100644
--- a/arch/powerpc/boot/util.S
+++ b/arch/powerpc/boot/util.S
@@ -71,18 +71,32 @@ udelay:
71 add r4,r4,r5 71 add r4,r4,r5
72 addi r4,r4,-1 72 addi r4,r4,-1
73 divw r4,r4,r5 /* BUS ticks */ 73 divw r4,r4,r5 /* BUS ticks */
74#ifdef CONFIG_8xx
751: mftbu r5
76 mftb r6
77 mftbu r7
78#else
741: mfspr r5, SPRN_TBRU 791: mfspr r5, SPRN_TBRU
75 mfspr r6, SPRN_TBRL 80 mfspr r6, SPRN_TBRL
76 mfspr r7, SPRN_TBRU 81 mfspr r7, SPRN_TBRU
82#endif
77 cmpw 0,r5,r7 83 cmpw 0,r5,r7
78 bne 1b /* Get [synced] base time */ 84 bne 1b /* Get [synced] base time */
79 addc r9,r6,r4 /* Compute end time */ 85 addc r9,r6,r4 /* Compute end time */
80 addze r8,r5 86 addze r8,r5
87#ifdef CONFIG_8xx
882: mftbu r5
89#else
812: mfspr r5, SPRN_TBRU 902: mfspr r5, SPRN_TBRU
91#endif
82 cmpw 0,r5,r8 92 cmpw 0,r5,r8
83 blt 2b 93 blt 2b
84 bgt 3f 94 bgt 3f
95#ifdef CONFIG_8xx
96 mftb r6
97#else
85 mfspr r6, SPRN_TBRL 98 mfspr r6, SPRN_TBRL
99#endif
86 cmpw 0,r6,r9 100 cmpw 0,r6,r9
87 blt 2b 101 blt 2b
883: blr 1023: blr
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index 69b57daf402e..0b88c7b30bb9 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
12CONFIG_PPC_MPC52xx=y 12CONFIG_PPC_MPC52xx=y
13CONFIG_PPC_MPC5200_SIMPLE=y 13CONFIG_PPC_MPC5200_SIMPLE=y
14# CONFIG_PPC_PMAC is not set 14# CONFIG_PPC_PMAC is not set
15CONFIG_PPC_BESTCOMM=y
16CONFIG_SPARSE_IRQ=y 15CONFIG_SPARSE_IRQ=y
17CONFIG_PM=y 16CONFIG_PM=y
18# CONFIG_PCI is not set 17# CONFIG_PCI is not set
@@ -71,6 +70,8 @@ CONFIG_USB_DEVICEFS=y
71CONFIG_USB_OHCI_HCD=y 70CONFIG_USB_OHCI_HCD=y
72CONFIG_USB_OHCI_HCD_PPC_OF_BE=y 71CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
73CONFIG_USB_STORAGE=y 72CONFIG_USB_STORAGE=y
73CONFIG_DMADEVICES=y
74CONFIG_PPC_BESTCOMM=y
74CONFIG_EXT2_FS=y 75CONFIG_EXT2_FS=y
75CONFIG_EXT3_FS=y 76CONFIG_EXT3_FS=y
76# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 77# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index f3638ae0a627..104a332e79ab 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -15,7 +15,6 @@ CONFIG_PPC_MPC52xx=y
15CONFIG_PPC_MPC5200_SIMPLE=y 15CONFIG_PPC_MPC5200_SIMPLE=y
16CONFIG_PPC_LITE5200=y 16CONFIG_PPC_LITE5200=y
17# CONFIG_PPC_PMAC is not set 17# CONFIG_PPC_PMAC is not set
18CONFIG_PPC_BESTCOMM=y
19CONFIG_NO_HZ=y 18CONFIG_NO_HZ=y
20CONFIG_HIGH_RES_TIMERS=y 19CONFIG_HIGH_RES_TIMERS=y
21CONFIG_SPARSE_IRQ=y 20CONFIG_SPARSE_IRQ=y
@@ -59,6 +58,8 @@ CONFIG_I2C_CHARDEV=y
59CONFIG_I2C_MPC=y 58CONFIG_I2C_MPC=y
60# CONFIG_HWMON is not set 59# CONFIG_HWMON is not set
61CONFIG_VIDEO_OUTPUT_CONTROL=m 60CONFIG_VIDEO_OUTPUT_CONTROL=m
61CONFIG_DMADEVICES=y
62CONFIG_PPC_BESTCOMM=y
62CONFIG_EXT2_FS=y 63CONFIG_EXT2_FS=y
63CONFIG_EXT3_FS=y 64CONFIG_EXT3_FS=y
64# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 65# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 0c7de9620ea6..0d13ad7e4478 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
12CONFIG_PPC_MPC52xx=y 12CONFIG_PPC_MPC52xx=y
13CONFIG_PPC_MPC5200_SIMPLE=y 13CONFIG_PPC_MPC5200_SIMPLE=y
14# CONFIG_PPC_PMAC is not set 14# CONFIG_PPC_PMAC is not set
15CONFIG_PPC_BESTCOMM=y
16CONFIG_SPARSE_IRQ=y 15CONFIG_SPARSE_IRQ=y
17CONFIG_PM=y 16CONFIG_PM=y
18# CONFIG_PCI is not set 17# CONFIG_PCI is not set
@@ -84,6 +83,8 @@ CONFIG_LEDS_TRIGGERS=y
84CONFIG_LEDS_TRIGGER_TIMER=y 83CONFIG_LEDS_TRIGGER_TIMER=y
85CONFIG_RTC_CLASS=y 84CONFIG_RTC_CLASS=y
86CONFIG_RTC_DRV_DS1307=y 85CONFIG_RTC_DRV_DS1307=y
86CONFIG_DMADEVICES=y
87CONFIG_PPC_BESTCOMM=y
87CONFIG_EXT2_FS=y 88CONFIG_EXT2_FS=y
88CONFIG_EXT3_FS=y 89CONFIG_EXT3_FS=y
89# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 90# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 22e719575c60..430aa182fa1c 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -21,7 +21,6 @@ CONFIG_MODULE_UNLOAD=y
21CONFIG_PPC_MPC52xx=y 21CONFIG_PPC_MPC52xx=y
22CONFIG_PPC_MPC5200_SIMPLE=y 22CONFIG_PPC_MPC5200_SIMPLE=y
23# CONFIG_PPC_PMAC is not set 23# CONFIG_PPC_PMAC is not set
24CONFIG_PPC_BESTCOMM=y
25CONFIG_NO_HZ=y 24CONFIG_NO_HZ=y
26CONFIG_HIGH_RES_TIMERS=y 25CONFIG_HIGH_RES_TIMERS=y
27CONFIG_HZ_100=y 26CONFIG_HZ_100=y
@@ -87,6 +86,8 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
87CONFIG_USB_STORAGE=m 86CONFIG_USB_STORAGE=m
88CONFIG_RTC_CLASS=y 87CONFIG_RTC_CLASS=y
89CONFIG_RTC_DRV_PCF8563=m 88CONFIG_RTC_DRV_PCF8563=m
89CONFIG_DMADEVICES=y
90CONFIG_PPC_BESTCOMM=y
90CONFIG_EXT2_FS=m 91CONFIG_EXT2_FS=m
91CONFIG_EXT3_FS=m 92CONFIG_EXT3_FS=m
92# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 93# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 716a37be16e3..7af4c5bb7c63 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -17,7 +17,6 @@ CONFIG_PPC_MPC52xx=y
17CONFIG_PPC_MPC5200_SIMPLE=y 17CONFIG_PPC_MPC5200_SIMPLE=y
18CONFIG_PPC_MPC5200_BUGFIX=y 18CONFIG_PPC_MPC5200_BUGFIX=y
19# CONFIG_PPC_PMAC is not set 19# CONFIG_PPC_PMAC is not set
20CONFIG_PPC_BESTCOMM=y
21CONFIG_PM=y 20CONFIG_PM=y
22# CONFIG_PCI is not set 21# CONFIG_PCI is not set
23CONFIG_NET=y 22CONFIG_NET=y
@@ -86,6 +85,8 @@ CONFIG_USB_STORAGE=y
86CONFIG_RTC_CLASS=y 85CONFIG_RTC_CLASS=y
87CONFIG_RTC_DRV_DS1307=y 86CONFIG_RTC_DRV_DS1307=y
88CONFIG_RTC_DRV_DS1374=y 87CONFIG_RTC_DRV_DS1374=y
88CONFIG_DMADEVICES=y
89CONFIG_PPC_BESTCOMM=y
89CONFIG_EXT2_FS=y 90CONFIG_EXT2_FS=y
90CONFIG_EXT3_FS=y 91CONFIG_EXT3_FS=y
91# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 92# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 6640a35bebb7..8b682d1cf4d6 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -15,7 +15,6 @@ CONFIG_PPC_MEDIA5200=y
15CONFIG_PPC_MPC5200_BUGFIX=y 15CONFIG_PPC_MPC5200_BUGFIX=y
16CONFIG_PPC_MPC5200_LPBFIFO=m 16CONFIG_PPC_MPC5200_LPBFIFO=m
17# CONFIG_PPC_PMAC is not set 17# CONFIG_PPC_PMAC is not set
18CONFIG_PPC_BESTCOMM=y
19CONFIG_SIMPLE_GPIO=y 18CONFIG_SIMPLE_GPIO=y
20CONFIG_NO_HZ=y 19CONFIG_NO_HZ=y
21CONFIG_HIGH_RES_TIMERS=y 20CONFIG_HIGH_RES_TIMERS=y
@@ -125,6 +124,8 @@ CONFIG_RTC_CLASS=y
125CONFIG_RTC_DRV_DS1307=y 124CONFIG_RTC_DRV_DS1307=y
126CONFIG_RTC_DRV_DS1374=y 125CONFIG_RTC_DRV_DS1374=y
127CONFIG_RTC_DRV_PCF8563=m 126CONFIG_RTC_DRV_PCF8563=m
127CONFIG_DMADEVICES=y
128CONFIG_PPC_BESTCOMM=y
128CONFIG_EXT2_FS=y 129CONFIG_EXT2_FS=y
129CONFIG_EXT3_FS=y 130CONFIG_EXT3_FS=y
130# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 131# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index bd8a6f71944f..cec044a3ff69 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -2,7 +2,6 @@ CONFIG_PPC64=y
2CONFIG_ALTIVEC=y 2CONFIG_ALTIVEC=y
3CONFIG_SMP=y 3CONFIG_SMP=y
4CONFIG_NR_CPUS=2 4CONFIG_NR_CPUS=2
5CONFIG_EXPERIMENTAL=y
6CONFIG_SYSVIPC=y 5CONFIG_SYSVIPC=y
7CONFIG_NO_HZ=y 6CONFIG_NO_HZ=y
8CONFIG_HIGH_RES_TIMERS=y 7CONFIG_HIGH_RES_TIMERS=y
@@ -45,8 +44,9 @@ CONFIG_INET_AH=y
45CONFIG_INET_ESP=y 44CONFIG_INET_ESP=y
46# CONFIG_IPV6 is not set 45# CONFIG_IPV6 is not set
47CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 46CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
47CONFIG_DEVTMPFS=y
48CONFIG_DEVTMPFS_MOUNT=y
48CONFIG_MTD=y 49CONFIG_MTD=y
49CONFIG_MTD_CHAR=y
50CONFIG_MTD_BLOCK=y 50CONFIG_MTD_BLOCK=y
51CONFIG_MTD_SLRAM=y 51CONFIG_MTD_SLRAM=y
52CONFIG_MTD_PHRAM=y 52CONFIG_MTD_PHRAM=y
@@ -88,7 +88,6 @@ CONFIG_BLK_DEV_DM=y
88CONFIG_DM_CRYPT=y 88CONFIG_DM_CRYPT=y
89CONFIG_NETDEVICES=y 89CONFIG_NETDEVICES=y
90CONFIG_DUMMY=y 90CONFIG_DUMMY=y
91CONFIG_MII=y
92CONFIG_TIGON3=y 91CONFIG_TIGON3=y
93CONFIG_E1000=y 92CONFIG_E1000=y
94CONFIG_PASEMI_MAC=y 93CONFIG_PASEMI_MAC=y
@@ -174,8 +173,8 @@ CONFIG_NLS_CODEPAGE_437=y
174CONFIG_NLS_ISO8859_1=y 173CONFIG_NLS_ISO8859_1=y
175CONFIG_CRC_CCITT=y 174CONFIG_CRC_CCITT=y
176CONFIG_PRINTK_TIME=y 175CONFIG_PRINTK_TIME=y
177CONFIG_MAGIC_SYSRQ=y
178CONFIG_DEBUG_FS=y 176CONFIG_DEBUG_FS=y
177CONFIG_MAGIC_SYSRQ=y
179CONFIG_DEBUG_KERNEL=y 178CONFIG_DEBUG_KERNEL=y
180CONFIG_DETECT_HUNG_TASK=y 179CONFIG_DETECT_HUNG_TASK=y
181# CONFIG_SCHED_DEBUG is not set 180# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig
new file mode 100644
index 000000000000..62771e0adb7c
--- /dev/null
+++ b/arch/powerpc/configs/pseries_le_defconfig
@@ -0,0 +1,352 @@
1CONFIG_PPC64=y
2CONFIG_ALTIVEC=y
3CONFIG_VSX=y
4CONFIG_SMP=y
5CONFIG_NR_CPUS=2048
6CONFIG_CPU_LITTLE_ENDIAN=y
7CONFIG_SYSVIPC=y
8CONFIG_POSIX_MQUEUE=y
9CONFIG_AUDIT=y
10CONFIG_AUDITSYSCALL=y
11CONFIG_IRQ_DOMAIN_DEBUG=y
12CONFIG_NO_HZ=y
13CONFIG_HIGH_RES_TIMERS=y
14CONFIG_TASKSTATS=y
15CONFIG_TASK_DELAY_ACCT=y
16CONFIG_TASK_XACCT=y
17CONFIG_TASK_IO_ACCOUNTING=y
18CONFIG_IKCONFIG=y
19CONFIG_IKCONFIG_PROC=y
20CONFIG_CGROUPS=y
21CONFIG_CGROUP_FREEZER=y
22CONFIG_CGROUP_DEVICE=y
23CONFIG_CPUSETS=y
24CONFIG_CGROUP_CPUACCT=y
25CONFIG_BLK_DEV_INITRD=y
26# CONFIG_COMPAT_BRK is not set
27CONFIG_PROFILING=y
28CONFIG_OPROFILE=y
29CONFIG_KPROBES=y
30CONFIG_JUMP_LABEL=y
31CONFIG_MODULES=y
32CONFIG_MODULE_UNLOAD=y
33CONFIG_MODVERSIONS=y
34CONFIG_MODULE_SRCVERSION_ALL=y
35CONFIG_PARTITION_ADVANCED=y
36CONFIG_PPC_SPLPAR=y
37CONFIG_SCANLOG=m
38CONFIG_PPC_SMLPAR=y
39CONFIG_DTL=y
40# CONFIG_PPC_PMAC is not set
41CONFIG_RTAS_FLASH=m
42CONFIG_IBMEBUS=y
43CONFIG_HZ_100=y
44CONFIG_BINFMT_MISC=m
45CONFIG_PPC_TRANSACTIONAL_MEM=y
46CONFIG_KEXEC=y
47CONFIG_IRQ_ALL_CPUS=y
48CONFIG_MEMORY_HOTPLUG=y
49CONFIG_MEMORY_HOTREMOVE=y
50CONFIG_CMA=y
51CONFIG_PPC_64K_PAGES=y
52CONFIG_PPC_SUBPAGE_PROT=y
53CONFIG_SCHED_SMT=y
54CONFIG_HOTPLUG_PCI=y
55CONFIG_HOTPLUG_PCI_RPA=m
56CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
57CONFIG_PACKET=y
58CONFIG_UNIX=y
59CONFIG_XFRM_USER=m
60CONFIG_NET_KEY=m
61CONFIG_INET=y
62CONFIG_IP_MULTICAST=y
63CONFIG_NET_IPIP=y
64CONFIG_SYN_COOKIES=y
65CONFIG_INET_AH=m
66CONFIG_INET_ESP=m
67CONFIG_INET_IPCOMP=m
68# CONFIG_IPV6 is not set
69CONFIG_NETFILTER=y
70CONFIG_NF_CONNTRACK=m
71CONFIG_NF_CONNTRACK_EVENTS=y
72CONFIG_NF_CT_PROTO_UDPLITE=m
73CONFIG_NF_CONNTRACK_FTP=m
74CONFIG_NF_CONNTRACK_IRC=m
75CONFIG_NF_CONNTRACK_TFTP=m
76CONFIG_NF_CT_NETLINK=m
77CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
78CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
79CONFIG_NETFILTER_XT_TARGET_MARK=m
80CONFIG_NETFILTER_XT_TARGET_NFLOG=m
81CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
82CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
83CONFIG_NETFILTER_XT_MATCH_COMMENT=m
84CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
85CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
86CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
87CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
88CONFIG_NETFILTER_XT_MATCH_DCCP=m
89CONFIG_NETFILTER_XT_MATCH_DSCP=m
90CONFIG_NETFILTER_XT_MATCH_ESP=m
91CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
92CONFIG_NETFILTER_XT_MATCH_HELPER=m
93CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
94CONFIG_NETFILTER_XT_MATCH_LENGTH=m
95CONFIG_NETFILTER_XT_MATCH_LIMIT=m
96CONFIG_NETFILTER_XT_MATCH_MAC=m
97CONFIG_NETFILTER_XT_MATCH_MARK=m
98CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
99CONFIG_NETFILTER_XT_MATCH_OWNER=m
100CONFIG_NETFILTER_XT_MATCH_POLICY=m
101CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
102CONFIG_NETFILTER_XT_MATCH_QUOTA=m
103CONFIG_NETFILTER_XT_MATCH_RATEEST=m
104CONFIG_NETFILTER_XT_MATCH_REALM=m
105CONFIG_NETFILTER_XT_MATCH_RECENT=m
106CONFIG_NETFILTER_XT_MATCH_SCTP=m
107CONFIG_NETFILTER_XT_MATCH_STATE=m
108CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
109CONFIG_NETFILTER_XT_MATCH_STRING=m
110CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
111CONFIG_NETFILTER_XT_MATCH_TIME=m
112CONFIG_NETFILTER_XT_MATCH_U32=m
113CONFIG_NF_CONNTRACK_IPV4=m
114CONFIG_IP_NF_IPTABLES=m
115CONFIG_IP_NF_MATCH_AH=m
116CONFIG_IP_NF_MATCH_ECN=m
117CONFIG_IP_NF_MATCH_TTL=m
118CONFIG_IP_NF_FILTER=m
119CONFIG_IP_NF_TARGET_REJECT=m
120CONFIG_IP_NF_TARGET_ULOG=m
121CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
122CONFIG_DEVTMPFS=y
123CONFIG_DEVTMPFS_MOUNT=y
124CONFIG_PROC_DEVICETREE=y
125CONFIG_PARPORT=m
126CONFIG_PARPORT_PC=m
127CONFIG_BLK_DEV_FD=m
128CONFIG_BLK_DEV_LOOP=y
129CONFIG_BLK_DEV_NBD=m
130CONFIG_BLK_DEV_RAM=y
131CONFIG_BLK_DEV_RAM_SIZE=65536
132CONFIG_VIRTIO_BLK=m
133CONFIG_IDE=y
134CONFIG_BLK_DEV_IDECD=y
135CONFIG_BLK_DEV_GENERIC=y
136CONFIG_BLK_DEV_AMD74XX=y
137CONFIG_BLK_DEV_SD=y
138CONFIG_CHR_DEV_ST=y
139CONFIG_BLK_DEV_SR=y
140CONFIG_BLK_DEV_SR_VENDOR=y
141CONFIG_CHR_DEV_SG=y
142CONFIG_SCSI_MULTI_LUN=y
143CONFIG_SCSI_CONSTANTS=y
144CONFIG_SCSI_FC_ATTRS=y
145CONFIG_SCSI_CXGB3_ISCSI=m
146CONFIG_SCSI_CXGB4_ISCSI=m
147CONFIG_SCSI_BNX2_ISCSI=m
148CONFIG_BE2ISCSI=m
149CONFIG_SCSI_MPT2SAS=m
150CONFIG_SCSI_IBMVSCSI=y
151CONFIG_SCSI_IBMVFC=m
152CONFIG_SCSI_SYM53C8XX_2=y
153CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
154CONFIG_SCSI_IPR=y
155CONFIG_SCSI_QLA_FC=m
156CONFIG_SCSI_QLA_ISCSI=m
157CONFIG_SCSI_LPFC=m
158CONFIG_SCSI_VIRTIO=m
159CONFIG_SCSI_DH=m
160CONFIG_SCSI_DH_RDAC=m
161CONFIG_SCSI_DH_ALUA=m
162CONFIG_ATA=y
163# CONFIG_ATA_SFF is not set
164CONFIG_MD=y
165CONFIG_BLK_DEV_MD=y
166CONFIG_MD_LINEAR=y
167CONFIG_MD_RAID0=y
168CONFIG_MD_RAID1=y
169CONFIG_MD_RAID10=m
170CONFIG_MD_RAID456=m
171CONFIG_MD_MULTIPATH=m
172CONFIG_MD_FAULTY=m
173CONFIG_BLK_DEV_DM=y
174CONFIG_DM_CRYPT=m
175CONFIG_DM_SNAPSHOT=m
176CONFIG_DM_MIRROR=m
177CONFIG_DM_ZERO=m
178CONFIG_DM_MULTIPATH=m
179CONFIG_DM_MULTIPATH_QL=m
180CONFIG_DM_MULTIPATH_ST=m
181CONFIG_DM_UEVENT=y
182CONFIG_BONDING=m
183CONFIG_DUMMY=m
184CONFIG_NETCONSOLE=y
185CONFIG_NETPOLL_TRAP=y
186CONFIG_TUN=m
187CONFIG_VIRTIO_NET=m
188CONFIG_VORTEX=y
189CONFIG_ACENIC=m
190CONFIG_ACENIC_OMIT_TIGON_I=y
191CONFIG_PCNET32=y
192CONFIG_TIGON3=y
193CONFIG_CHELSIO_T1=m
194CONFIG_BE2NET=m
195CONFIG_S2IO=m
196CONFIG_IBMVETH=y
197CONFIG_EHEA=y
198CONFIG_E100=y
199CONFIG_E1000=y
200CONFIG_E1000E=y
201CONFIG_IXGB=m
202CONFIG_IXGBE=m
203CONFIG_MLX4_EN=m
204CONFIG_MYRI10GE=m
205CONFIG_QLGE=m
206CONFIG_NETXEN_NIC=m
207CONFIG_PPP=m
208CONFIG_PPP_BSDCOMP=m
209CONFIG_PPP_DEFLATE=m
210CONFIG_PPPOE=m
211CONFIG_PPP_ASYNC=m
212CONFIG_PPP_SYNC_TTY=m
213# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
214CONFIG_INPUT_EVDEV=m
215CONFIG_INPUT_MISC=y
216CONFIG_INPUT_PCSPKR=m
217# CONFIG_SERIO_SERPORT is not set
218CONFIG_SERIAL_8250=y
219CONFIG_SERIAL_8250_CONSOLE=y
220CONFIG_SERIAL_ICOM=m
221CONFIG_SERIAL_JSM=m
222CONFIG_HVC_CONSOLE=y
223CONFIG_HVC_RTAS=y
224CONFIG_HVCS=m
225CONFIG_VIRTIO_CONSOLE=m
226CONFIG_IBM_BSR=m
227CONFIG_GEN_RTC=y
228CONFIG_RAW_DRIVER=y
229CONFIG_MAX_RAW_DEVS=1024
230CONFIG_FB=y
231CONFIG_FIRMWARE_EDID=y
232CONFIG_FB_OF=y
233CONFIG_FB_MATROX=y
234CONFIG_FB_MATROX_MILLENIUM=y
235CONFIG_FB_MATROX_MYSTIQUE=y
236CONFIG_FB_MATROX_G=y
237CONFIG_FB_RADEON=y
238CONFIG_FB_IBM_GXT4500=y
239CONFIG_LCD_PLATFORM=m
240# CONFIG_VGA_CONSOLE is not set
241CONFIG_FRAMEBUFFER_CONSOLE=y
242CONFIG_LOGO=y
243CONFIG_HID_GYRATION=y
244CONFIG_HID_PANTHERLORD=y
245CONFIG_HID_PETALYNX=y
246CONFIG_HID_SAMSUNG=y
247CONFIG_HID_SUNPLUS=y
248CONFIG_USB_HIDDEV=y
249CONFIG_USB=y
250CONFIG_USB_MON=m
251CONFIG_USB_EHCI_HCD=y
252# CONFIG_USB_EHCI_HCD_PPC_OF is not set
253CONFIG_USB_OHCI_HCD=y
254CONFIG_USB_STORAGE=m
255CONFIG_INFINIBAND=m
256CONFIG_INFINIBAND_USER_MAD=m
257CONFIG_INFINIBAND_USER_ACCESS=m
258CONFIG_INFINIBAND_MTHCA=m
259CONFIG_INFINIBAND_EHCA=m
260CONFIG_INFINIBAND_CXGB3=m
261CONFIG_INFINIBAND_CXGB4=m
262CONFIG_MLX4_INFINIBAND=m
263CONFIG_INFINIBAND_IPOIB=m
264CONFIG_INFINIBAND_IPOIB_CM=y
265CONFIG_INFINIBAND_SRP=m
266CONFIG_INFINIBAND_ISER=m
267CONFIG_VIRTIO_PCI=m
268CONFIG_VIRTIO_BALLOON=m
269CONFIG_EXT2_FS=y
270CONFIG_EXT2_FS_XATTR=y
271CONFIG_EXT2_FS_POSIX_ACL=y
272CONFIG_EXT2_FS_SECURITY=y
273CONFIG_EXT2_FS_XIP=y
274CONFIG_EXT3_FS=y
275CONFIG_EXT3_FS_POSIX_ACL=y
276CONFIG_EXT3_FS_SECURITY=y
277CONFIG_EXT4_FS=y
278CONFIG_EXT4_FS_POSIX_ACL=y
279CONFIG_EXT4_FS_SECURITY=y
280CONFIG_REISERFS_FS=y
281CONFIG_REISERFS_FS_XATTR=y
282CONFIG_REISERFS_FS_POSIX_ACL=y
283CONFIG_REISERFS_FS_SECURITY=y
284CONFIG_JFS_FS=m
285CONFIG_JFS_POSIX_ACL=y
286CONFIG_JFS_SECURITY=y
287CONFIG_XFS_FS=m
288CONFIG_XFS_POSIX_ACL=y
289CONFIG_BTRFS_FS=m
290CONFIG_BTRFS_FS_POSIX_ACL=y
291CONFIG_NILFS2_FS=m
292CONFIG_AUTOFS4_FS=m
293CONFIG_FUSE_FS=m
294CONFIG_ISO9660_FS=y
295CONFIG_UDF_FS=m
296CONFIG_MSDOS_FS=y
297CONFIG_VFAT_FS=y
298CONFIG_PROC_KCORE=y
299CONFIG_TMPFS=y
300CONFIG_TMPFS_POSIX_ACL=y
301CONFIG_HUGETLBFS=y
302CONFIG_CRAMFS=m
303CONFIG_SQUASHFS=m
304CONFIG_SQUASHFS_XATTR=y
305CONFIG_SQUASHFS_LZO=y
306CONFIG_SQUASHFS_XZ=y
307CONFIG_PSTORE=y
308CONFIG_NFS_FS=y
309CONFIG_NFS_V3_ACL=y
310CONFIG_NFS_V4=y
311CONFIG_NFSD=m
312CONFIG_NFSD_V3_ACL=y
313CONFIG_NFSD_V4=y
314CONFIG_CIFS=m
315CONFIG_CIFS_XATTR=y
316CONFIG_CIFS_POSIX=y
317CONFIG_NLS_DEFAULT="utf8"
318CONFIG_NLS_CODEPAGE_437=y
319CONFIG_NLS_ASCII=y
320CONFIG_NLS_ISO8859_1=y
321CONFIG_NLS_UTF8=y
322CONFIG_CRC_T10DIF=y
323CONFIG_MAGIC_SYSRQ=y
324CONFIG_DEBUG_KERNEL=y
325CONFIG_DEBUG_STACK_USAGE=y
326CONFIG_DEBUG_STACKOVERFLOW=y
327CONFIG_LOCKUP_DETECTOR=y
328CONFIG_LATENCYTOP=y
329CONFIG_SCHED_TRACER=y
330CONFIG_BLK_DEV_IO_TRACE=y
331CONFIG_CODE_PATCHING_SELFTEST=y
332CONFIG_FTR_FIXUP_SELFTEST=y
333CONFIG_MSI_BITMAP_SELFTEST=y
334CONFIG_XMON=y
335CONFIG_CRYPTO_TEST=m
336CONFIG_CRYPTO_PCBC=m
337CONFIG_CRYPTO_HMAC=y
338CONFIG_CRYPTO_MICHAEL_MIC=m
339CONFIG_CRYPTO_TGR192=m
340CONFIG_CRYPTO_WP512=m
341CONFIG_CRYPTO_ANUBIS=m
342CONFIG_CRYPTO_BLOWFISH=m
343CONFIG_CRYPTO_CAST6=m
344CONFIG_CRYPTO_KHAZAD=m
345CONFIG_CRYPTO_SALSA20=m
346CONFIG_CRYPTO_SERPENT=m
347CONFIG_CRYPTO_TEA=m
348CONFIG_CRYPTO_TWOFISH=m
349CONFIG_CRYPTO_LZO=m
350# CONFIG_CRYPTO_ANSI_CPRNG is not set
351CONFIG_CRYPTO_DEV_NX=y
352CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index cc0655a702a7..935b5e7a1436 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -31,6 +31,8 @@
31extern unsigned long randomize_et_dyn(unsigned long base); 31extern unsigned long randomize_et_dyn(unsigned long base);
32#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) 32#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
33 33
34#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
35
34/* 36/*
35 * Our registers are always unsigned longs, whether we're a 32 bit 37 * Our registers are always unsigned longs, whether we're a 32 bit
36 * process or 64 bit, on either a 64 bit or 32 bit kernel. 38 * process or 64 bit, on either a 64 bit or 32 bit kernel.
@@ -86,6 +88,8 @@ typedef elf_vrregset_t elf_fpxregset_t;
86#ifdef __powerpc64__ 88#ifdef __powerpc64__
87# define SET_PERSONALITY(ex) \ 89# define SET_PERSONALITY(ex) \
88do { \ 90do { \
91 if (((ex).e_flags & 0x3) == 2) \
92 set_thread_flag(TIF_ELF2ABI); \
89 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ 93 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
90 set_thread_flag(TIF_32BIT); \ 94 set_thread_flag(TIF_32BIT); \
91 else \ 95 else \
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 0c7f2bfcf134..d8b600b3f058 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -403,6 +403,8 @@ static inline unsigned long cmo_get_page_size(void)
403extern long pSeries_enable_reloc_on_exc(void); 403extern long pSeries_enable_reloc_on_exc(void);
404extern long pSeries_disable_reloc_on_exc(void); 404extern long pSeries_disable_reloc_on_exc(void);
405 405
406extern long pseries_big_endian_exceptions(void);
407
406#else 408#else
407 409
408#define pSeries_enable_reloc_on_exc() do {} while (0) 410#define pSeries_enable_reloc_on_exc() do {} while (0)
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 27b2386f738a..842846c1b711 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
84static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 84static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
85 unsigned long address) 85 unsigned long address)
86{ 86{
87 struct page *page = page_address(table);
88
89 tlb_flush_pgtable(tlb, address); 87 tlb_flush_pgtable(tlb, address);
90 pgtable_page_dtor(page); 88 pgtable_page_dtor(table);
91 pgtable_free_tlb(tlb, page, 0); 89 pgtable_free_tlb(tlb, page_address(table), 0);
92} 90}
93#endif /* _ASM_POWERPC_PGALLOC_32_H */ 91#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 16cb92d215d2..4b0be20fcbfd 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -16,6 +16,7 @@ struct vmemmap_backing {
16 unsigned long phys; 16 unsigned long phys;
17 unsigned long virt_addr; 17 unsigned long virt_addr;
18}; 18};
19extern struct vmemmap_backing *vmemmap_list;
19 20
20/* 21/*
21 * Functions that deal with pagetables that could be at any level of 22 * Functions that deal with pagetables that could be at any level of
@@ -147,11 +148,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
147static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 148static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
148 unsigned long address) 149 unsigned long address)
149{ 150{
150 struct page *page = page_address(table);
151
152 tlb_flush_pgtable(tlb, address); 151 tlb_flush_pgtable(tlb, address);
153 pgtable_page_dtor(page); 152 pgtable_page_dtor(table);
154 pgtable_free_tlb(tlb, page, 0); 153 pgtable_free_tlb(tlb, page_address(table), 0);
155} 154}
156 155
157#else /* if CONFIG_PPC_64K_PAGES */ 156#else /* if CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index a63b045e707c..12c32c5f533d 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -287,6 +287,32 @@ static inline long disable_reloc_on_exceptions(void) {
287 return plpar_set_mode(0, 3, 0, 0); 287 return plpar_set_mode(0, 3, 0, 0);
288} 288}
289 289
290/*
291 * Take exceptions in big endian mode on this partition
292 *
293 * Note: this call has a partition wide scope and can take a while to complete.
294 * If it returns H_LONG_BUSY_* it should be retried periodically until it
295 * returns H_SUCCESS.
296 */
297static inline long enable_big_endian_exceptions(void)
298{
299 /* mflags = 0: big endian exceptions */
300 return plpar_set_mode(0, 4, 0, 0);
301}
302
303/*
304 * Take exceptions in little endian mode on this partition
305 *
306 * Note: this call has a partition wide scope and can take a while to complete.
307 * If it returns H_LONG_BUSY_* it should be retried periodically until it
308 * returns H_SUCCESS.
309 */
310static inline long enable_little_endian_exceptions(void)
311{
312 /* mflags = 1: little endian exceptions */
313 return plpar_set_mode(1, 4, 0, 0);
314}
315
290static inline long plapr_set_ciabr(unsigned long ciabr) 316static inline long plapr_set_ciabr(unsigned long ciabr)
291{ 317{
292 return plpar_set_mode(0, 1, ciabr, 0); 318 return plpar_set_mode(0, 1, ciabr, 0);
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 3c1acc31a092..f595b98079ee 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -366,6 +366,8 @@ BEGIN_FTR_SECTION_NESTED(96); \
366 cmpwi dest,0; \ 366 cmpwi dest,0; \
367 beq- 90b; \ 367 beq- 90b; \
368END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) 368END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
369#elif defined(CONFIG_8xx)
370#define MFTB(dest) mftb dest
369#else 371#else
370#define MFTB(dest) mfspr dest, SPRN_TBRL 372#define MFTB(dest) mfspr dest, SPRN_TBRL
371#endif 373#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5c45787d551e..fa8388ed94c5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1174,12 +1174,19 @@
1174 1174
1175#else /* __powerpc64__ */ 1175#else /* __powerpc64__ */
1176 1176
1177#if defined(CONFIG_8xx)
1178#define mftbl() ({unsigned long rval; \
1179 asm volatile("mftbl %0" : "=r" (rval)); rval;})
1180#define mftbu() ({unsigned long rval; \
1181 asm volatile("mftbu %0" : "=r" (rval)); rval;})
1182#else
1177#define mftbl() ({unsigned long rval; \ 1183#define mftbl() ({unsigned long rval; \
1178 asm volatile("mfspr %0, %1" : "=r" (rval) : \ 1184 asm volatile("mfspr %0, %1" : "=r" (rval) : \
1179 "i" (SPRN_TBRL)); rval;}) 1185 "i" (SPRN_TBRL)); rval;})
1180#define mftbu() ({unsigned long rval; \ 1186#define mftbu() ({unsigned long rval; \
1181 asm volatile("mfspr %0, %1" : "=r" (rval) : \ 1187 asm volatile("mfspr %0, %1" : "=r" (rval) : \
1182 "i" (SPRN_TBRU)); rval;}) 1188 "i" (SPRN_TBRU)); rval;})
1189#endif
1183#endif /* !__powerpc64__ */ 1190#endif /* !__powerpc64__ */
1184 1191
1185#define mttbl(v) asm volatile("mttbl %0":: "r"(v)) 1192#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 98da78e0c2c0..084e0807db98 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -33,6 +33,7 @@ extern int boot_cpuid;
33extern int spinning_secondaries; 33extern int spinning_secondaries;
34 34
35extern void cpu_die(void); 35extern void cpu_die(void);
36extern int cpu_to_chip_id(int cpu);
36 37
37#ifdef CONFIG_SMP 38#ifdef CONFIG_SMP
38 39
@@ -112,7 +113,6 @@ static inline struct cpumask *cpu_core_mask(int cpu)
112} 113}
113 114
114extern int cpu_to_core_id(int cpu); 115extern int cpu_to_core_id(int cpu);
115extern int cpu_to_chip_id(int cpu);
116 116
117/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. 117/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
118 * 118 *
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index ba7b1973866e..9854c564ac52 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -82,8 +82,6 @@ static inline struct thread_info *current_thread_info(void)
82 82
83#endif /* __ASSEMBLY__ */ 83#endif /* __ASSEMBLY__ */
84 84
85#define PREEMPT_ACTIVE 0x10000000
86
87/* 85/*
88 * thread information flag bit numbers 86 * thread information flag bit numbers
89 */ 87 */
@@ -107,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
107#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation 105#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
108 for stack store? */ 106 for stack store? */
109#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 107#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
108#if defined(CONFIG_PPC64)
109#define TIF_ELF2ABI 18 /* function descriptors must die! */
110#endif
110 111
111/* as above, but as bit values */ 112/* as above, but as bit values */
112#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 113#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -185,6 +186,12 @@ static inline bool test_thread_local_flags(unsigned int flags)
185#define is_32bit_task() (1) 186#define is_32bit_task() (1)
186#endif 187#endif
187 188
189#if defined(CONFIG_PPC64)
190#define is_elf2_task() (test_thread_flag(TIF_ELF2ABI))
191#else
192#define is_elf2_task() (0)
193#endif
194
188#endif /* !__ASSEMBLY__ */ 195#endif /* !__ASSEMBLY__ */
189 196
190#endif /* __KERNEL__ */ 197#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index 18908caa1f3b..2cf846edb3fc 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void)
29 ret = 0; 29 ret = 0;
30 30
31 __asm__ __volatile__( 31 __asm__ __volatile__(
32#ifdef CONFIG_8xx
33 "97: mftb %0\n"
34#else
32 "97: mfspr %0, %2\n" 35 "97: mfspr %0, %2\n"
36#endif
33 "99:\n" 37 "99:\n"
34 ".section __ftr_fixup,\"a\"\n" 38 ".section __ftr_fixup,\"a\"\n"
35 ".align 2\n" 39 ".align 2\n"
@@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void)
41 " .long 0\n" 45 " .long 0\n"
42 " .long 0\n" 46 " .long 0\n"
43 ".previous" 47 ".previous"
48#ifdef CONFIG_8xx
49 : "=r" (ret) : "i" (CPU_FTR_601));
50#else
44 : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL)); 51 : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
52#endif
45 return ret; 53 return ret;
46#endif 54#endif
47} 55}
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 671302065347..4bd687d5e7aa 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -686,6 +686,15 @@ void eeh_save_bars(struct eeh_dev *edev)
686 686
687 for (i = 0; i < 16; i++) 687 for (i = 0; i < 16; i++)
688 eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]); 688 eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
689
690 /*
691 * For PCI bridges including root port, we need enable bus
692 * master explicitly. Otherwise, it can't fetch IODA table
693 * entries correctly. So we cache the bit in advance so that
694 * we can restore it after reset, either PHB range or PE range.
695 */
696 if (edev->mode & EEH_DEV_BRIDGE)
697 edev->config_space[1] |= PCI_COMMAND_MASTER;
689} 698}
690 699
691/** 700/**
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index d27c5afc90ae..72d748b56c86 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -74,8 +74,13 @@ static int eeh_event_handler(void * dummy)
74 pe = event->pe; 74 pe = event->pe;
75 if (pe) { 75 if (pe) {
76 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 76 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
77 pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n", 77 if (pe->type & EEH_PE_PHB)
78 pe->phb->global_number, pe->addr); 78 pr_info("EEH: Detected error on PHB#%d\n",
79 pe->phb->global_number);
80 else
81 pr_info("EEH: Detected PCI bus error on "
82 "PHB#%d-PE#%x\n",
83 pe->phb->global_number, pe->addr);
79 eeh_handle_event(pe); 84 eeh_handle_event(pe);
80 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 85 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
81 } else { 86 } else {
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index e1ec57e87b3b..75d4f7340da8 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -18,6 +18,7 @@
18#include <linux/ftrace.h> 18#include <linux/ftrace.h>
19 19
20#include <asm/machdep.h> 20#include <asm/machdep.h>
21#include <asm/pgalloc.h>
21#include <asm/prom.h> 22#include <asm/prom.h>
22#include <asm/sections.h> 23#include <asm/sections.h>
23 24
@@ -75,6 +76,17 @@ void arch_crash_save_vmcoreinfo(void)
75#ifndef CONFIG_NEED_MULTIPLE_NODES 76#ifndef CONFIG_NEED_MULTIPLE_NODES
76 VMCOREINFO_SYMBOL(contig_page_data); 77 VMCOREINFO_SYMBOL(contig_page_data);
77#endif 78#endif
79#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
80 VMCOREINFO_SYMBOL(vmemmap_list);
81 VMCOREINFO_SYMBOL(mmu_vmemmap_psize);
82 VMCOREINFO_SYMBOL(mmu_psize_defs);
83 VMCOREINFO_STRUCT_SIZE(vmemmap_backing);
84 VMCOREINFO_OFFSET(vmemmap_backing, list);
85 VMCOREINFO_OFFSET(vmemmap_backing, phys);
86 VMCOREINFO_OFFSET(vmemmap_backing, virt_addr);
87 VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
88 VMCOREINFO_OFFSET(mmu_psize_def, shift);
89#endif
78} 90}
79 91
80/* 92/*
@@ -136,7 +148,7 @@ void __init reserve_crashkernel(void)
136 * a small SLB (128MB) since the crash kernel needs to place 148 * a small SLB (128MB) since the crash kernel needs to place
137 * itself and some stacks to be in the first segment. 149 * itself and some stacks to be in the first segment.
138 */ 150 */
139 crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2)); 151 crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
140#else 152#else
141 crashk_res.start = KDUMP_KERNELBASE; 153 crashk_res.start = KDUMP_KERNELBASE;
142#endif 154#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e59caf874d05..64bf8db12b15 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -246,8 +246,8 @@ _GLOBAL(__bswapdi2)
246 or r3,r7,r9 246 or r3,r7,r9
247 blr 247 blr
248 248
249#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
250 249
250#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
251_GLOBAL(rmci_on) 251_GLOBAL(rmci_on)
252 sync 252 sync
253 isync 253 isync
@@ -277,6 +277,9 @@ _GLOBAL(rmci_off)
277 isync 277 isync
278 sync 278 sync
279 blr 279 blr
280#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
281
282#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
280 283
281/* 284/*
282 * Do an IO access in real mode 285 * Do an IO access in real mode
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index fd82c289ab1c..28b898e68185 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -210,7 +210,7 @@ static void __init nvram_print_partitions(char * label)
210 printk(KERN_WARNING "--------%s---------\n", label); 210 printk(KERN_WARNING "--------%s---------\n", label);
211 printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n"); 211 printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
212 list_for_each_entry(tmp_part, &nvram_partitions, partition) { 212 list_for_each_entry(tmp_part, &nvram_partitions, partition) {
213 printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12s\n", 213 printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12.12s\n",
214 tmp_part->index, tmp_part->header.signature, 214 tmp_part->index, tmp_part->header.signature,
215 tmp_part->header.checksum, tmp_part->header.length, 215 tmp_part->header.checksum, tmp_part->header.length,
216 tmp_part->header.name); 216 tmp_part->header.name);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 75c2d1009985..3386d8ab7eb0 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -858,17 +858,21 @@ void show_regs(struct pt_regs * regs)
858 printk("MSR: "REG" ", regs->msr); 858 printk("MSR: "REG" ", regs->msr);
859 printbits(regs->msr, msr_bits); 859 printbits(regs->msr, msr_bits);
860 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 860 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
861#ifdef CONFIG_PPC64
862 printk("SOFTE: %ld\n", regs->softe);
863#endif
864 trap = TRAP(regs); 861 trap = TRAP(regs);
865 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 862 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
866 printk("CFAR: "REG"\n", regs->orig_gpr3); 863 printk("CFAR: "REG" ", regs->orig_gpr3);
867 if (trap == 0x300 || trap == 0x600) 864 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
868#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 865#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
869 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); 866 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
870#else 867#else
871 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); 868 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
869#endif
870#ifdef CONFIG_PPC64
871 printk("SOFTE: %ld ", regs->softe);
872#endif
873#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
874 if (MSR_TM_ACTIVE(regs->msr))
875 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
872#endif 876#endif
873 877
874 for (i = 0; i < 32; i++) { 878 for (i = 0; i < 32; i++) {
@@ -887,9 +891,6 @@ void show_regs(struct pt_regs * regs)
887 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); 891 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
888 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); 892 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
889#endif 893#endif
890#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
891 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
892#endif
893 show_stack(current, (unsigned long *) regs->gpr[1]); 894 show_stack(current, (unsigned long *) regs->gpr[1]);
894 if (!user_mode(regs)) 895 if (!user_mode(regs))
895 show_instructions(regs); 896 show_instructions(regs);
@@ -1086,25 +1087,45 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1086 regs->msr = MSR_USER; 1087 regs->msr = MSR_USER;
1087#else 1088#else
1088 if (!is_32bit_task()) { 1089 if (!is_32bit_task()) {
1089 unsigned long entry, toc; 1090 unsigned long entry;
1090 1091
1091 /* start is a relocated pointer to the function descriptor for 1092 if (is_elf2_task()) {
1092 * the elf _start routine. The first entry in the function 1093 /* Look ma, no function descriptors! */
1093 * descriptor is the entry address of _start and the second 1094 entry = start;
1094 * entry is the TOC value we need to use.
1095 */
1096 __get_user(entry, (unsigned long __user *)start);
1097 __get_user(toc, (unsigned long __user *)start+1);
1098 1095
1099 /* Check whether the e_entry function descriptor entries 1096 /*
1100 * need to be relocated before we can use them. 1097 * Ulrich says:
1101 */ 1098 * The latest iteration of the ABI requires that when
1102 if (load_addr != 0) { 1099 * calling a function (at its global entry point),
1103 entry += load_addr; 1100 * the caller must ensure r12 holds the entry point
1104 toc += load_addr; 1101 * address (so that the function can quickly
1102 * establish addressability).
1103 */
1104 regs->gpr[12] = start;
1105 /* Make sure that's restored on entry to userspace. */
1106 set_thread_flag(TIF_RESTOREALL);
1107 } else {
1108 unsigned long toc;
1109
1110 /* start is a relocated pointer to the function
1111 * descriptor for the elf _start routine. The first
1112 * entry in the function descriptor is the entry
1113 * address of _start and the second entry is the TOC
1114 * value we need to use.
1115 */
1116 __get_user(entry, (unsigned long __user *)start);
1117 __get_user(toc, (unsigned long __user *)start+1);
1118
1119 /* Check whether the e_entry function descriptor entries
1120 * need to be relocated before we can use them.
1121 */
1122 if (load_addr != 0) {
1123 entry += load_addr;
1124 toc += load_addr;
1125 }
1126 regs->gpr[2] = toc;
1105 } 1127 }
1106 regs->nip = entry; 1128 regs->nip = entry;
1107 regs->gpr[2] = toc;
1108 regs->msr = MSR_USER64; 1129 regs->msr = MSR_USER64;
1109 } else { 1130 } else {
1110 regs->nip = start; 1131 regs->nip = start;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f3a47098fb8e..fa0ad8aafbcc 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -777,6 +777,26 @@ int of_get_ibm_chip_id(struct device_node *np)
777 return -1; 777 return -1;
778} 778}
779 779
780/**
781 * cpu_to_chip_id - Return the cpus chip-id
782 * @cpu: The logical cpu number.
783 *
784 * Return the value of the ibm,chip-id property corresponding to the given
785 * logical cpu number. If the chip-id can not be found, returns -1.
786 */
787int cpu_to_chip_id(int cpu)
788{
789 struct device_node *np;
790
791 np = of_get_cpu_node(cpu, NULL);
792 if (!np)
793 return -1;
794
795 of_node_put(np);
796 return of_get_ibm_chip_id(np);
797}
798EXPORT_SYMBOL(cpu_to_chip_id);
799
780#ifdef CONFIG_PPC_PSERIES 800#ifdef CONFIG_PPC_PSERIES
781/* 801/*
782 * Fix up the uninitialized fields in a new device node: 802 * Fix up the uninitialized fields in a new device node:
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 749778e0a69d..68027bfa5f8e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -445,6 +445,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
445#endif /* CONFIG_ALTIVEC */ 445#endif /* CONFIG_ALTIVEC */
446 if (copy_fpr_to_user(&frame->mc_fregs, current)) 446 if (copy_fpr_to_user(&frame->mc_fregs, current))
447 return 1; 447 return 1;
448
449 /*
450 * Clear the MSR VSX bit to indicate there is no valid state attached
451 * to this context, except in the specific case below where we set it.
452 */
453 msr &= ~MSR_VSX;
448#ifdef CONFIG_VSX 454#ifdef CONFIG_VSX
449 /* 455 /*
450 * Copy VSR 0-31 upper half from thread_struct to local 456 * Copy VSR 0-31 upper half from thread_struct to local
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index b3c615764c9b..42991045349f 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -122,6 +122,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
122 flush_fp_to_thread(current); 122 flush_fp_to_thread(current);
123 /* copy fpr regs and fpscr */ 123 /* copy fpr regs and fpscr */
124 err |= copy_fpr_to_user(&sc->fp_regs, current); 124 err |= copy_fpr_to_user(&sc->fp_regs, current);
125
126 /*
127 * Clear the MSR VSX bit to indicate there is no valid state attached
128 * to this context, except in the specific case below where we set it.
129 */
130 msr &= ~MSR_VSX;
125#ifdef CONFIG_VSX 131#ifdef CONFIG_VSX
126 /* 132 /*
127 * Copy VSX low doubleword to local buffer for formatting, 133 * Copy VSX low doubleword to local buffer for formatting,
@@ -701,12 +707,6 @@ badframe:
701int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, 707int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
702 sigset_t *set, struct pt_regs *regs) 708 sigset_t *set, struct pt_regs *regs)
703{ 709{
704 /* Handler is *really* a pointer to the function descriptor for
705 * the signal routine. The first entry in the function
706 * descriptor is the entry address of signal and the second
707 * entry is the TOC value we need to use.
708 */
709 func_descr_t __user *funct_desc_ptr;
710 struct rt_sigframe __user *frame; 710 struct rt_sigframe __user *frame;
711 unsigned long newsp = 0; 711 unsigned long newsp = 0;
712 long err = 0; 712 long err = 0;
@@ -766,19 +766,32 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
766 goto badframe; 766 goto badframe;
767 regs->link = (unsigned long) &frame->tramp[0]; 767 regs->link = (unsigned long) &frame->tramp[0];
768 } 768 }
769 funct_desc_ptr = (func_descr_t __user *) ka->sa.sa_handler;
770 769
771 /* Allocate a dummy caller frame for the signal handler. */ 770 /* Allocate a dummy caller frame for the signal handler. */
772 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; 771 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
773 err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); 772 err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
774 773
775 /* Set up "regs" so we "return" to the signal handler. */ 774 /* Set up "regs" so we "return" to the signal handler. */
776 err |= get_user(regs->nip, &funct_desc_ptr->entry); 775 if (is_elf2_task()) {
776 regs->nip = (unsigned long) ka->sa.sa_handler;
777 regs->gpr[12] = regs->nip;
778 } else {
779 /* Handler is *really* a pointer to the function descriptor for
780 * the signal routine. The first entry in the function
781 * descriptor is the entry address of signal and the second
782 * entry is the TOC value we need to use.
783 */
784 func_descr_t __user *funct_desc_ptr =
785 (func_descr_t __user *) ka->sa.sa_handler;
786
787 err |= get_user(regs->nip, &funct_desc_ptr->entry);
788 err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
789 }
790
777 /* enter the signal handler in native-endian mode */ 791 /* enter the signal handler in native-endian mode */
778 regs->msr &= ~MSR_LE; 792 regs->msr &= ~MSR_LE;
779 regs->msr |= (MSR_KERNEL & MSR_LE); 793 regs->msr |= (MSR_KERNEL & MSR_LE);
780 regs->gpr[1] = newsp; 794 regs->gpr[1] = newsp;
781 err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
782 regs->gpr[3] = signr; 795 regs->gpr[3] = signr;
783 regs->result = 0; 796 regs->result = 0;
784 if (ka->sa.sa_flags & SA_SIGINFO) { 797 if (ka->sa.sa_flags & SA_SIGINFO) {
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 930cd8af3503..a3b64f3bf9a2 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -597,22 +597,6 @@ out:
597 return id; 597 return id;
598} 598}
599 599
600/* Return the value of the chip-id property corresponding
601 * to the given logical cpu.
602 */
603int cpu_to_chip_id(int cpu)
604{
605 struct device_node *np;
606
607 np = of_get_cpu_node(cpu, NULL);
608 if (!np)
609 return -1;
610
611 of_node_put(np);
612 return of_get_ibm_chip_id(np);
613}
614EXPORT_SYMBOL(cpu_to_chip_id);
615
616/* Helper routines for cpu to core mapping */ 600/* Helper routines for cpu to core mapping */
617int cpu_core_index_of_thread(int cpu) 601int cpu_core_index_of_thread(int cpu)
618{ 602{
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 192b051df97e..b3b144121cc9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -213,8 +213,6 @@ static u64 scan_dispatch_log(u64 stop_tb)
213 if (i == be64_to_cpu(vpa->dtl_idx)) 213 if (i == be64_to_cpu(vpa->dtl_idx))
214 return 0; 214 return 0;
215 while (i < be64_to_cpu(vpa->dtl_idx)) { 215 while (i < be64_to_cpu(vpa->dtl_idx)) {
216 if (dtl_consumer)
217 dtl_consumer(dtl, i);
218 dtb = be64_to_cpu(dtl->timebase); 216 dtb = be64_to_cpu(dtl->timebase);
219 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + 217 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
220 be32_to_cpu(dtl->ready_to_enqueue_time); 218 be32_to_cpu(dtl->ready_to_enqueue_time);
@@ -227,6 +225,8 @@ static u64 scan_dispatch_log(u64 stop_tb)
227 } 225 }
228 if (dtb > stop_tb) 226 if (dtb > stop_tb)
229 break; 227 break;
228 if (dtl_consumer)
229 dtl_consumer(dtl, i);
230 stolen += tb_delta; 230 stolen += tb_delta;
231 ++i; 231 ++i;
232 ++dtl; 232 ++dtl;
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 6b1f2a6d5517..6b2b69616e77 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -232,9 +232,15 @@ __do_get_tspec:
232 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) 232 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
233 233
234 /* Get a stable TB value */ 234 /* Get a stable TB value */
235#ifdef CONFIG_8xx
2362: mftbu r3
237 mftbl r4
238 mftbu r0
239#else
2352: mfspr r3, SPRN_TBRU 2402: mfspr r3, SPRN_TBRU
236 mfspr r4, SPRN_TBRL 241 mfspr r4, SPRN_TBRL
237 mfspr r0, SPRN_TBRU 242 mfspr r0, SPRN_TBRU
243#endif
238 cmplw cr0,r3,r0 244 cmplw cr0,r3,r0
239 bne- 2b 245 bne- 2b
240 246
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S
index 45ea281e9a21..542c6f422e4d 100644
--- a/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/arch/powerpc/kernel/vdso64/sigtramp.S
@@ -142,6 +142,13 @@ V_FUNCTION_END(__kernel_sigtramp_rt64)
142/* Size of CR reg in DWARF unwind info. */ 142/* Size of CR reg in DWARF unwind info. */
143#define CRSIZE 4 143#define CRSIZE 4
144 144
145/* Offset of CR reg within a full word. */
146#ifdef __LITTLE_ENDIAN__
147#define CROFF 0
148#else
149#define CROFF (RSIZE - CRSIZE)
150#endif
151
145/* This is the offset of the VMX reg pointer. */ 152/* This is the offset of the VMX reg pointer. */
146#define VREGS 48*RSIZE+33*8 153#define VREGS 48*RSIZE+33*8
147 154
@@ -181,7 +188,14 @@ V_FUNCTION_END(__kernel_sigtramp_rt64)
181 rsave (31, 31*RSIZE); \ 188 rsave (31, 31*RSIZE); \
182 rsave (67, 32*RSIZE); /* ap, used as temp for nip */ \ 189 rsave (67, 32*RSIZE); /* ap, used as temp for nip */ \
183 rsave (65, 36*RSIZE); /* lr */ \ 190 rsave (65, 36*RSIZE); /* lr */ \
184 rsave (70, 38*RSIZE + (RSIZE - CRSIZE)) /* cr */ 191 rsave (68, 38*RSIZE + CROFF); /* cr fields */ \
192 rsave (69, 38*RSIZE + CROFF); \
193 rsave (70, 38*RSIZE + CROFF); \
194 rsave (71, 38*RSIZE + CROFF); \
195 rsave (72, 38*RSIZE + CROFF); \
196 rsave (73, 38*RSIZE + CROFF); \
197 rsave (74, 38*RSIZE + CROFF); \
198 rsave (75, 38*RSIZE + CROFF)
185 199
186/* Describe where the FP regs are saved. */ 200/* Describe where the FP regs are saved. */
187#define EH_FRAME_FP \ 201#define EH_FRAME_FP \
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index e7d0c88f621a..76a64821f4a2 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1419,7 +1419,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1419 1419
1420 /* needed to ensure proper operation of coherent allocations 1420 /* needed to ensure proper operation of coherent allocations
1421 * later, in case driver doesn't set it explicitly */ 1421 * later, in case driver doesn't set it explicitly */
1422 dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64)); 1422 dma_coerce_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
1423 } 1423 }
1424 1424
1425 /* register with generic device framework */ 1425 /* register with generic device framework */
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index 6936547018b8..c5f734e20b0f 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -123,6 +123,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
123 struct mm_struct *mm = current->mm; 123 struct mm_struct *mm = current->mm;
124 unsigned long addr, len, end; 124 unsigned long addr, len, end;
125 unsigned long next; 125 unsigned long next;
126 unsigned long flags;
126 pgd_t *pgdp; 127 pgd_t *pgdp;
127 int nr = 0; 128 int nr = 0;
128 129
@@ -156,7 +157,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
156 * So long as we atomically load page table pointers versus teardown, 157 * So long as we atomically load page table pointers versus teardown,
157 * we can follow the address down to the the page and take a ref on it. 158 * we can follow the address down to the the page and take a ref on it.
158 */ 159 */
159 local_irq_disable(); 160 local_irq_save(flags);
160 161
161 pgdp = pgd_offset(mm, addr); 162 pgdp = pgd_offset(mm, addr);
162 do { 163 do {
@@ -179,7 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
179 break; 180 break;
180 } while (pgdp++, addr = next, addr != end); 181 } while (pgdp++, addr = next, addr != end);
181 182
182 local_irq_enable(); 183 local_irq_restore(flags);
183 184
184 return nr; 185 return nr;
185} 186}
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 3bc700655fc8..74551b5e41e5 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -117,6 +117,5 @@ void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
117 struct hstate *hstate = hstate_file(vma->vm_file); 117 struct hstate *hstate = hstate_file(vma->vm_file);
118 unsigned long tsize = huge_page_shift(hstate) - 10; 118 unsigned long tsize = huge_page_shift(hstate) - 10;
119 119
120 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, tsize, 0); 120 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
121
122} 121}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 3e99c149271a..7ce9cf3b6988 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -258,7 +258,7 @@ static bool slice_scan_available(unsigned long addr,
258 slice = GET_HIGH_SLICE_INDEX(addr); 258 slice = GET_HIGH_SLICE_INDEX(addr);
259 *boundary_addr = (slice + end) ? 259 *boundary_addr = (slice + end) ?
260 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP; 260 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
261 return !!(available.high_slices & (1u << slice)); 261 return !!(available.high_slices & (1ul << slice));
262 } 262 }
263} 263}
264 264
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 41cd68dee681..358d74303138 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -305,7 +305,7 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
305void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 305void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
306{ 306{
307#ifdef CONFIG_HUGETLB_PAGE 307#ifdef CONFIG_HUGETLB_PAGE
308 if (is_vm_hugetlb_page(vma)) 308 if (vma && is_vm_hugetlb_page(vma))
309 flush_hugetlb_page(vma, vmaddr); 309 flush_hugetlb_page(vma, vmaddr);
310#endif 310#endif
311 311
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index c2a566fb8bb8..bca2465a9c34 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -403,3 +403,28 @@ config PPC_DOORBELL
403 default n 403 default n
404 404
405endmenu 405endmenu
406
407choice
408 prompt "Endianness selection"
409 default CPU_BIG_ENDIAN
410 help
411 This option selects whether a big endian or little endian kernel will
412 be built.
413
414config CPU_BIG_ENDIAN
415 bool "Build big endian kernel"
416 help
417 Build a big endian kernel.
418
419 If unsure, select this option.
420
421config CPU_LITTLE_ENDIAN
422 bool "Build little endian kernel"
423 help
424 Build a little endian kernel.
425
426 Note that if cross compiling a little endian kernel,
427 CROSS_COMPILE must point to a toolchain capable of targeting
428 little endian powerpc.
429
430endchoice
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 084cdfa40682..2c6d173842b2 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -720,6 +720,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
720 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; 720 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
721 } 721 }
722 iommu_init_table(tbl, phb->hose->node); 722 iommu_init_table(tbl, phb->hose->node);
723 iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
723 724
724 if (pe->pdev) 725 if (pe->pdev)
725 set_iommu_table_base(&pe->pdev->dev, tbl); 726 set_iommu_table_base(&pe->pdev->dev, tbl);
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 8844628915dc..1cb160dc1609 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -19,6 +19,7 @@
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/prom.h> 20#include <asm/prom.h>
21#include <asm/machdep.h> 21#include <asm/machdep.h>
22#include <asm/smp.h>
22 23
23 24
24struct powernv_rng { 25struct powernv_rng {
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 7fbc25b1813f..ccb633e077b1 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -189,8 +189,9 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
189 struct eeh_dev *edev; 189 struct eeh_dev *edev;
190 struct eeh_pe pe; 190 struct eeh_pe pe;
191 struct pci_dn *pdn = PCI_DN(dn); 191 struct pci_dn *pdn = PCI_DN(dn);
192 const u32 *class_code, *vendor_id, *device_id; 192 const __be32 *classp, *vendorp, *devicep;
193 const u32 *regs; 193 u32 class_code;
194 const __be32 *regs;
194 u32 pcie_flags; 195 u32 pcie_flags;
195 int enable = 0; 196 int enable = 0;
196 int ret; 197 int ret;
@@ -201,22 +202,24 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
201 return NULL; 202 return NULL;
202 203
203 /* Retrieve class/vendor/device IDs */ 204 /* Retrieve class/vendor/device IDs */
204 class_code = of_get_property(dn, "class-code", NULL); 205 classp = of_get_property(dn, "class-code", NULL);
205 vendor_id = of_get_property(dn, "vendor-id", NULL); 206 vendorp = of_get_property(dn, "vendor-id", NULL);
206 device_id = of_get_property(dn, "device-id", NULL); 207 devicep = of_get_property(dn, "device-id", NULL);
207 208
208 /* Skip for bad OF node or PCI-ISA bridge */ 209 /* Skip for bad OF node or PCI-ISA bridge */
209 if (!class_code || !vendor_id || !device_id) 210 if (!classp || !vendorp || !devicep)
210 return NULL; 211 return NULL;
211 if (dn->type && !strcmp(dn->type, "isa")) 212 if (dn->type && !strcmp(dn->type, "isa"))
212 return NULL; 213 return NULL;
213 214
215 class_code = of_read_number(classp, 1);
216
214 /* 217 /*
215 * Update class code and mode of eeh device. We need 218 * Update class code and mode of eeh device. We need
216 * correctly reflects that current device is root port 219 * correctly reflects that current device is root port
217 * or PCIe switch downstream port. 220 * or PCIe switch downstream port.
218 */ 221 */
219 edev->class_code = *class_code; 222 edev->class_code = class_code;
220 edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP); 223 edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
221 edev->mode &= 0xFFFFFF00; 224 edev->mode &= 0xFFFFFF00;
222 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 225 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
@@ -243,12 +246,12 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
243 /* Initialize the fake PE */ 246 /* Initialize the fake PE */
244 memset(&pe, 0, sizeof(struct eeh_pe)); 247 memset(&pe, 0, sizeof(struct eeh_pe));
245 pe.phb = edev->phb; 248 pe.phb = edev->phb;
246 pe.config_addr = regs[0]; 249 pe.config_addr = of_read_number(regs, 1);
247 250
248 /* Enable EEH on the device */ 251 /* Enable EEH on the device */
249 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); 252 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
250 if (!ret) { 253 if (!ret) {
251 edev->config_addr = regs[0]; 254 edev->config_addr = of_read_number(regs, 1);
252 /* Retrieve PE address */ 255 /* Retrieve PE address */
253 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); 256 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
254 pe.addr = edev->pe_config_addr; 257 pe.addr = edev->pe_config_addr;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 356bc75ca74f..4fca3def9db9 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -245,6 +245,23 @@ static void pSeries_lpar_hptab_clear(void)
245 &(ptes[j].pteh), &(ptes[j].ptel)); 245 &(ptes[j].pteh), &(ptes[j].ptel));
246 } 246 }
247 } 247 }
248
249#ifdef __LITTLE_ENDIAN__
250 /* Reset exceptions to big endian */
251 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
252 long rc;
253
254 rc = pseries_big_endian_exceptions();
255 /*
256 * At this point it is unlikely panic() will get anything
257 * out to the user, but at least this will stop us from
258 * continuing on further and creating an even more
259 * difficult to debug situation.
260 */
261 if (rc)
262 panic("Could not enable big endian exceptions");
263 }
264#endif
248} 265}
249 266
250/* 267/*
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
index a702f1c08242..72a102758d4e 100644
--- a/arch/powerpc/platforms/pseries/rng.c
+++ b/arch/powerpc/platforms/pseries/rng.c
@@ -13,6 +13,7 @@
13#include <linux/of.h> 13#include <linux/of.h>
14#include <asm/archrandom.h> 14#include <asm/archrandom.h>
15#include <asm/machdep.h> 15#include <asm/machdep.h>
16#include <asm/plpar_wrappers.h>
16 17
17 18
18static int pseries_get_random_long(unsigned long *v) 19static int pseries_get_random_long(unsigned long *v)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 1f97e2b87a62..c1f190858701 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -442,6 +442,32 @@ static void pSeries_machine_kexec(struct kimage *image)
442} 442}
443#endif 443#endif
444 444
445#ifdef __LITTLE_ENDIAN__
446long pseries_big_endian_exceptions(void)
447{
448 long rc;
449
450 while (1) {
451 rc = enable_big_endian_exceptions();
452 if (!H_IS_LONG_BUSY(rc))
453 return rc;
454 mdelay(get_longbusy_msecs(rc));
455 }
456}
457
458static long pseries_little_endian_exceptions(void)
459{
460 long rc;
461
462 while (1) {
463 rc = enable_little_endian_exceptions();
464 if (!H_IS_LONG_BUSY(rc))
465 return rc;
466 mdelay(get_longbusy_msecs(rc));
467 }
468}
469#endif
470
445static void __init pSeries_setup_arch(void) 471static void __init pSeries_setup_arch(void)
446{ 472{
447 panic_timeout = 10; 473 panic_timeout = 10;
@@ -698,6 +724,22 @@ static int __init pSeries_probe(void)
698 /* Now try to figure out if we are running on LPAR */ 724 /* Now try to figure out if we are running on LPAR */
699 of_scan_flat_dt(pseries_probe_fw_features, NULL); 725 of_scan_flat_dt(pseries_probe_fw_features, NULL);
700 726
727#ifdef __LITTLE_ENDIAN__
728 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
729 long rc;
730 /*
731 * Tell the hypervisor that we want our exceptions to
732 * be taken in little endian mode. If this fails we don't
733 * want to use BUG() because it will trigger an exception.
734 */
735 rc = pseries_little_endian_exceptions();
736 if (rc) {
737 ppc_md.progress("H_SET_MODE LE exception fail", 0);
738 panic("Could not enable little endian exceptions");
739 }
740 }
741#endif
742
701 if (firmware_has_feature(FW_FEATURE_LPAR)) 743 if (firmware_has_feature(FW_FEATURE_LPAR))
702 hpte_init_lpar(); 744 hpte_init_lpar();
703 else 745 else
diff --git a/arch/powerpc/platforms/wsp/chroma.c b/arch/powerpc/platforms/wsp/chroma.c
index 8ef53bc2e70e..aaa46b353715 100644
--- a/arch/powerpc/platforms/wsp/chroma.c
+++ b/arch/powerpc/platforms/wsp/chroma.c
@@ -15,6 +15,7 @@
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/smp.h> 16#include <linux/smp.h>
17#include <linux/time.h> 17#include <linux/time.h>
18#include <linux/of_fdt.h>
18 19
19#include <asm/machdep.h> 20#include <asm/machdep.h>
20#include <asm/udbg.h> 21#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/wsp/h8.c b/arch/powerpc/platforms/wsp/h8.c
index d18e6cc19df3..a3c87f395750 100644
--- a/arch/powerpc/platforms/wsp/h8.c
+++ b/arch/powerpc/platforms/wsp/h8.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/of.h> 11#include <linux/of.h>
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/of_address.h>
13 14
14#include "wsp.h" 15#include "wsp.h"
15 16
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index 2d3b1dd9571d..9cd92e645028 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -18,6 +18,8 @@
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
21 23
22#include <asm/io.h> 24#include <asm/io.h>
23#include <asm/irq.h> 25#include <asm/irq.h>
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
index cb565bf93650..3f6729807938 100644
--- a/arch/powerpc/platforms/wsp/opb_pic.c
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -15,6 +15,8 @@
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/time.h> 17#include <linux/time.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
18 20
19#include <asm/reg_a2.h> 21#include <asm/reg_a2.h>
20#include <asm/irq.h> 22#include <asm/irq.h>
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
index 508ec8282b96..a87b414c766a 100644
--- a/arch/powerpc/platforms/wsp/psr2.c
+++ b/arch/powerpc/platforms/wsp/psr2.c
@@ -15,6 +15,7 @@
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/smp.h> 16#include <linux/smp.h>
17#include <linux/time.h> 17#include <linux/time.h>
18#include <linux/of_fdt.h>
18 19
19#include <asm/machdep.h> 20#include <asm/machdep.h>
20#include <asm/udbg.h> 21#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c
index 8928507affea..6538b4de34fc 100644
--- a/arch/powerpc/platforms/wsp/scom_wsp.c
+++ b/arch/powerpc/platforms/wsp/scom_wsp.c
@@ -14,6 +14,7 @@
14#include <linux/of.h> 14#include <linux/of.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/of_address.h>
17 18
18#include <asm/cputhreads.h> 19#include <asm/cputhreads.h>
19#include <asm/reg_a2.h> 20#include <asm/reg_a2.h>
diff --git a/arch/powerpc/platforms/wsp/wsp.c b/arch/powerpc/platforms/wsp/wsp.c
index ddb6efe88914..58cd1f00e1ef 100644
--- a/arch/powerpc/platforms/wsp/wsp.c
+++ b/arch/powerpc/platforms/wsp/wsp.c
@@ -13,6 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/time.h> 15#include <linux/time.h>
16#include <linux/of_address.h>
16 17
17#include <asm/scom.h> 18#include <asm/scom.h>
18 19
diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c
index b7c43453236d..85d9e37f5ccb 100644
--- a/arch/powerpc/sysdev/ppc4xx_ocm.c
+++ b/arch/powerpc/sysdev/ppc4xx_ocm.c
@@ -339,7 +339,7 @@ void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
339 if (IS_ERR_VALUE(offset)) 339 if (IS_ERR_VALUE(offset))
340 continue; 340 continue;
341 341
342 ocm_blk = kzalloc(sizeof(struct ocm_block *), GFP_KERNEL); 342 ocm_blk = kzalloc(sizeof(struct ocm_block), GFP_KERNEL);
343 if (!ocm_blk) { 343 if (!ocm_blk) {
344 printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block"); 344 printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block");
345 rh_free(ocm_reg->rh, offset); 345 rh_free(ocm_reg->rh, offset);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 314fced4fc14..1e1a03d2d19f 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -101,7 +101,7 @@ config S390
101 select GENERIC_CPU_DEVICES if !SMP 101 select GENERIC_CPU_DEVICES if !SMP
102 select GENERIC_FIND_FIRST_BIT 102 select GENERIC_FIND_FIRST_BIT
103 select GENERIC_SMP_IDLE_THREAD 103 select GENERIC_SMP_IDLE_THREAD
104 select GENERIC_TIME_VSYSCALL_OLD 104 select GENERIC_TIME_VSYSCALL
105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
106 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 106 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
107 select HAVE_ARCH_SECCOMP_FILTER 107 select HAVE_ARCH_SECCOMP_FILTER
@@ -347,14 +347,14 @@ config SMP
347 Even if you don't know what to do here, say Y. 347 Even if you don't know what to do here, say Y.
348 348
349config NR_CPUS 349config NR_CPUS
350 int "Maximum number of CPUs (2-64)" 350 int "Maximum number of CPUs (2-256)"
351 range 2 64 351 range 2 256
352 depends on SMP 352 depends on SMP
353 default "32" if !64BIT 353 default "32" if !64BIT
354 default "64" if 64BIT 354 default "64" if 64BIT
355 help 355 help
356 This allows you to specify the maximum number of CPUs which this 356 This allows you to specify the maximum number of CPUs which this
357 kernel will support. The maximum supported value is 64 and the 357 kernel will support. The maximum supported value is 256 and the
358 minimum value which makes sense is 2. 358 minimum value which makes sense is 2.
359 359
360 This is purely to save memory - each supported CPU adds 360 This is purely to save memory - each supported CPU adds
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index f2737a005afc..9a42ecec5647 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -21,6 +21,6 @@ $(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
21$(obj)/compressed/vmlinux: FORCE 21$(obj)/compressed/vmlinux: FORCE
22 $(Q)$(MAKE) $(build)=$(obj)/compressed $@ 22 $(Q)$(MAKE) $(build)=$(obj)/compressed $@
23 23
24install: $(CONFIGURE) $(obj)/image 24install: $(CONFIGURE) $(obj)/bzImage
25 sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ 25 sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
26 System.map "$(INSTALL_PATH)" 26 System.map "$(INSTALL_PATH)"
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 46cae138ece2..b3feabd39f31 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -35,7 +35,6 @@ static u8 *ctrblk;
35static char keylen_flag; 35static char keylen_flag;
36 36
37struct s390_aes_ctx { 37struct s390_aes_ctx {
38 u8 iv[AES_BLOCK_SIZE];
39 u8 key[AES_MAX_KEY_SIZE]; 38 u8 key[AES_MAX_KEY_SIZE];
40 long enc; 39 long enc;
41 long dec; 40 long dec;
@@ -56,8 +55,7 @@ struct pcc_param {
56 55
57struct s390_xts_ctx { 56struct s390_xts_ctx {
58 u8 key[32]; 57 u8 key[32];
59 u8 xts_param[16]; 58 u8 pcc_key[32];
60 struct pcc_param pcc;
61 long enc; 59 long enc;
62 long dec; 60 long dec;
63 int key_len; 61 int key_len;
@@ -441,30 +439,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
441 return aes_set_key(tfm, in_key, key_len); 439 return aes_set_key(tfm, in_key, key_len);
442} 440}
443 441
444static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, 442static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
445 struct blkcipher_walk *walk) 443 struct blkcipher_walk *walk)
446{ 444{
445 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
447 int ret = blkcipher_walk_virt(desc, walk); 446 int ret = blkcipher_walk_virt(desc, walk);
448 unsigned int nbytes = walk->nbytes; 447 unsigned int nbytes = walk->nbytes;
448 struct {
449 u8 iv[AES_BLOCK_SIZE];
450 u8 key[AES_MAX_KEY_SIZE];
451 } param;
449 452
450 if (!nbytes) 453 if (!nbytes)
451 goto out; 454 goto out;
452 455
453 memcpy(param, walk->iv, AES_BLOCK_SIZE); 456 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
457 memcpy(param.key, sctx->key, sctx->key_len);
454 do { 458 do {
455 /* only use complete blocks */ 459 /* only use complete blocks */
456 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); 460 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
457 u8 *out = walk->dst.virt.addr; 461 u8 *out = walk->dst.virt.addr;
458 u8 *in = walk->src.virt.addr; 462 u8 *in = walk->src.virt.addr;
459 463
460 ret = crypt_s390_kmc(func, param, out, in, n); 464 ret = crypt_s390_kmc(func, &param, out, in, n);
461 if (ret < 0 || ret != n) 465 if (ret < 0 || ret != n)
462 return -EIO; 466 return -EIO;
463 467
464 nbytes &= AES_BLOCK_SIZE - 1; 468 nbytes &= AES_BLOCK_SIZE - 1;
465 ret = blkcipher_walk_done(desc, walk, nbytes); 469 ret = blkcipher_walk_done(desc, walk, nbytes);
466 } while ((nbytes = walk->nbytes)); 470 } while ((nbytes = walk->nbytes));
467 memcpy(walk->iv, param, AES_BLOCK_SIZE); 471 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
468 472
469out: 473out:
470 return ret; 474 return ret;
@@ -481,7 +485,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
481 return fallback_blk_enc(desc, dst, src, nbytes); 485 return fallback_blk_enc(desc, dst, src, nbytes);
482 486
483 blkcipher_walk_init(&walk, dst, src, nbytes); 487 blkcipher_walk_init(&walk, dst, src, nbytes);
484 return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); 488 return cbc_aes_crypt(desc, sctx->enc, &walk);
485} 489}
486 490
487static int cbc_aes_decrypt(struct blkcipher_desc *desc, 491static int cbc_aes_decrypt(struct blkcipher_desc *desc,
@@ -495,7 +499,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
495 return fallback_blk_dec(desc, dst, src, nbytes); 499 return fallback_blk_dec(desc, dst, src, nbytes);
496 500
497 blkcipher_walk_init(&walk, dst, src, nbytes); 501 blkcipher_walk_init(&walk, dst, src, nbytes);
498 return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); 502 return cbc_aes_crypt(desc, sctx->dec, &walk);
499} 503}
500 504
501static struct crypto_alg cbc_aes_alg = { 505static struct crypto_alg cbc_aes_alg = {
@@ -586,7 +590,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
586 xts_ctx->enc = KM_XTS_128_ENCRYPT; 590 xts_ctx->enc = KM_XTS_128_ENCRYPT;
587 xts_ctx->dec = KM_XTS_128_DECRYPT; 591 xts_ctx->dec = KM_XTS_128_DECRYPT;
588 memcpy(xts_ctx->key + 16, in_key, 16); 592 memcpy(xts_ctx->key + 16, in_key, 16);
589 memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16); 593 memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
590 break; 594 break;
591 case 48: 595 case 48:
592 xts_ctx->enc = 0; 596 xts_ctx->enc = 0;
@@ -597,7 +601,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
597 xts_ctx->enc = KM_XTS_256_ENCRYPT; 601 xts_ctx->enc = KM_XTS_256_ENCRYPT;
598 xts_ctx->dec = KM_XTS_256_DECRYPT; 602 xts_ctx->dec = KM_XTS_256_DECRYPT;
599 memcpy(xts_ctx->key, in_key, 32); 603 memcpy(xts_ctx->key, in_key, 32);
600 memcpy(xts_ctx->pcc.key, in_key + 32, 32); 604 memcpy(xts_ctx->pcc_key, in_key + 32, 32);
601 break; 605 break;
602 default: 606 default:
603 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 607 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -616,29 +620,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
616 unsigned int nbytes = walk->nbytes; 620 unsigned int nbytes = walk->nbytes;
617 unsigned int n; 621 unsigned int n;
618 u8 *in, *out; 622 u8 *in, *out;
619 void *param; 623 struct pcc_param pcc_param;
624 struct {
625 u8 key[32];
626 u8 init[16];
627 } xts_param;
620 628
621 if (!nbytes) 629 if (!nbytes)
622 goto out; 630 goto out;
623 631
624 memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block)); 632 memset(pcc_param.block, 0, sizeof(pcc_param.block));
625 memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit)); 633 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
626 memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts)); 634 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
627 memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak)); 635 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
628 param = xts_ctx->pcc.key + offset; 636 memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
629 ret = crypt_s390_pcc(func, param); 637 ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
630 if (ret < 0) 638 if (ret < 0)
631 return -EIO; 639 return -EIO;
632 640
633 memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16); 641 memcpy(xts_param.key, xts_ctx->key, 32);
634 param = xts_ctx->key + offset; 642 memcpy(xts_param.init, pcc_param.xts, 16);
635 do { 643 do {
636 /* only use complete blocks */ 644 /* only use complete blocks */
637 n = nbytes & ~(AES_BLOCK_SIZE - 1); 645 n = nbytes & ~(AES_BLOCK_SIZE - 1);
638 out = walk->dst.virt.addr; 646 out = walk->dst.virt.addr;
639 in = walk->src.virt.addr; 647 in = walk->src.virt.addr;
640 648
641 ret = crypt_s390_km(func, param, out, in, n); 649 ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
642 if (ret < 0 || ret != n) 650 if (ret < 0 || ret != n)
643 return -EIO; 651 return -EIO;
644 652
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 9b69c0befdca..4e63f1a13600 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -7,6 +7,8 @@
7#ifndef __ASM_CTL_REG_H 7#ifndef __ASM_CTL_REG_H
8#define __ASM_CTL_REG_H 8#define __ASM_CTL_REG_H
9 9
10#include <linux/bug.h>
11
10#ifdef CONFIG_64BIT 12#ifdef CONFIG_64BIT
11# define __CTL_LOAD "lctlg" 13# define __CTL_LOAD "lctlg"
12# define __CTL_STORE "stctg" 14# define __CTL_STORE "stctg"
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index dc9200ca32ed..67026300c88e 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -111,18 +111,7 @@ struct scm_driver {
111int scm_driver_register(struct scm_driver *scmdrv); 111int scm_driver_register(struct scm_driver *scmdrv);
112void scm_driver_unregister(struct scm_driver *scmdrv); 112void scm_driver_unregister(struct scm_driver *scmdrv);
113 113
114int scm_start_aob(struct aob *aob); 114int eadm_start_aob(struct aob *aob);
115void scm_irq_handler(struct aob *aob, int error); 115void scm_irq_handler(struct aob *aob, int error);
116 116
117struct eadm_ops {
118 int (*eadm_start) (struct aob *aob);
119 struct module *owner;
120};
121
122int scm_get_ref(void);
123void scm_put_ref(void);
124
125void register_eadm_ops(struct eadm_ops *ops);
126void unregister_eadm_ops(struct eadm_ops *ops);
127
128#endif /* _ASM_S390_EADM_H */ 117#endif /* _ASM_S390_EADM_H */
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index a908d2941c5d..b7eabaaeffbd 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -18,8 +18,6 @@
18#define __ARCH_HAS_DO_SOFTIRQ 18#define __ARCH_HAS_DO_SOFTIRQ
19#define __ARCH_IRQ_EXIT_IRQS_DISABLED 19#define __ARCH_IRQ_EXIT_IRQS_DISABLED
20 20
21#define HARDIRQ_BITS 8
22
23static inline void ack_bad_irq(unsigned int irq) 21static inline void ack_bad_irq(unsigned int irq)
24{ 22{
25 printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); 23 printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 316c8503a3b4..114258eeaacd 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -48,33 +48,21 @@ static inline void clear_page(void *page)
48 : "memory", "cc"); 48 : "memory", "cc");
49} 49}
50 50
51/*
52 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
53 * bypass caches when copying a page. Especially when copying huge pages
54 * this keeps L1 and L2 data caches alive.
55 */
51static inline void copy_page(void *to, void *from) 56static inline void copy_page(void *to, void *from)
52{ 57{
53 if (MACHINE_HAS_MVPG) { 58 register void *reg2 asm ("2") = to;
54 register unsigned long reg0 asm ("0") = 0; 59 register unsigned long reg3 asm ("3") = 0x1000;
55 asm volatile( 60 register void *reg4 asm ("4") = from;
56 " mvpg %0,%1" 61 register unsigned long reg5 asm ("5") = 0xb0001000;
57 : : "a" (to), "a" (from), "d" (reg0) 62 asm volatile(
58 : "memory", "cc"); 63 " mvcl 2,4"
59 } else 64 : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
60 asm volatile( 65 : : "memory", "cc");
61 " mvc 0(256,%0),0(%1)\n"
62 " mvc 256(256,%0),256(%1)\n"
63 " mvc 512(256,%0),512(%1)\n"
64 " mvc 768(256,%0),768(%1)\n"
65 " mvc 1024(256,%0),1024(%1)\n"
66 " mvc 1280(256,%0),1280(%1)\n"
67 " mvc 1536(256,%0),1536(%1)\n"
68 " mvc 1792(256,%0),1792(%1)\n"
69 " mvc 2048(256,%0),2048(%1)\n"
70 " mvc 2304(256,%0),2304(%1)\n"
71 " mvc 2560(256,%0),2560(%1)\n"
72 " mvc 2816(256,%0),2816(%1)\n"
73 " mvc 3072(256,%0),3072(%1)\n"
74 " mvc 3328(256,%0),3328(%1)\n"
75 " mvc 3584(256,%0),3584(%1)\n"
76 " mvc 3840(256,%0),3840(%1)\n"
77 : : "a" (to), "a" (from) : "memory");
78} 66}
79 67
80#define clear_user_page(page, vaddr, pg) clear_page(page) 68#define clear_user_page(page, vaddr, pg) clear_page(page)
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 1cc185da9d38..c129ab2ac731 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -63,9 +63,10 @@ enum zpci_state {
63}; 63};
64 64
65struct zpci_bar_struct { 65struct zpci_bar_struct {
66 struct resource *res; /* bus resource */
66 u32 val; /* bar start & 3 flag bits */ 67 u32 val; /* bar start & 3 flag bits */
67 u8 size; /* order 2 exponent */
68 u16 map_idx; /* index into bar mapping array */ 68 u16 map_idx; /* index into bar mapping array */
69 u8 size; /* order 2 exponent */
69}; 70};
70 71
71/* Private data per function */ 72/* Private data per function */
@@ -97,6 +98,7 @@ struct zpci_dev {
97 unsigned long iommu_pages; 98 unsigned long iommu_pages;
98 unsigned int next_bit; 99 unsigned int next_bit;
99 100
101 char res_name[16];
100 struct zpci_bar_struct bars[PCI_BAR_COUNT]; 102 struct zpci_bar_struct bars[PCI_BAR_COUNT];
101 103
102 u64 start_dma; /* Start of available DMA addresses */ 104 u64 start_dma; /* Start of available DMA addresses */
@@ -122,12 +124,10 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
122 Prototypes 124 Prototypes
123----------------------------------------------------------------------------- */ 125----------------------------------------------------------------------------- */
124/* Base stuff */ 126/* Base stuff */
125struct zpci_dev *zpci_alloc_device(void);
126int zpci_create_device(struct zpci_dev *); 127int zpci_create_device(struct zpci_dev *);
127int zpci_enable_device(struct zpci_dev *); 128int zpci_enable_device(struct zpci_dev *);
128int zpci_disable_device(struct zpci_dev *); 129int zpci_disable_device(struct zpci_dev *);
129void zpci_stop_device(struct zpci_dev *); 130void zpci_stop_device(struct zpci_dev *);
130void zpci_free_device(struct zpci_dev *);
131int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); 131int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
132int zpci_unregister_ioat(struct zpci_dev *, u8); 132int zpci_unregister_ioat(struct zpci_dev *, u8);
133 133
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 7dc7f9c63b65..2f390956c7c1 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <asm/chpid.h> 10#include <asm/chpid.h>
11#include <asm/cpu.h>
11 12
12#define SCLP_CHP_INFO_MASK_SIZE 32 13#define SCLP_CHP_INFO_MASK_SIZE 32
13 14
@@ -37,13 +38,12 @@ struct sclp_cpu_info {
37 unsigned int standby; 38 unsigned int standby;
38 unsigned int combined; 39 unsigned int combined;
39 int has_cpu_type; 40 int has_cpu_type;
40 struct sclp_cpu_entry cpu[255]; 41 struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
41}; 42};
42 43
43int sclp_get_cpu_info(struct sclp_cpu_info *info); 44int sclp_get_cpu_info(struct sclp_cpu_info *info);
44int sclp_cpu_configure(u8 cpu); 45int sclp_cpu_configure(u8 cpu);
45int sclp_cpu_deconfigure(u8 cpu); 46int sclp_cpu_deconfigure(u8 cpu);
46void sclp_facilities_detect(void);
47unsigned long long sclp_get_rnmax(void); 47unsigned long long sclp_get_rnmax(void);
48unsigned long long sclp_get_rzm(void); 48unsigned long long sclp_get_rzm(void);
49int sclp_sdias_blk_count(void); 49int sclp_sdias_blk_count(void);
@@ -57,5 +57,7 @@ bool sclp_has_vt220(void);
57int sclp_pci_configure(u32 fid); 57int sclp_pci_configure(u32 fid);
58int sclp_pci_deconfigure(u32 fid); 58int sclp_pci_deconfigure(u32 fid);
59int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); 59int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
60unsigned long sclp_get_hsa_size(void);
61void sclp_early_detect(void);
60 62
61#endif /* _ASM_S390_SCLP_H */ 63#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index df802ee14af6..94cfbe442f12 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -107,9 +107,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
107#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) 107#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
108#endif /* CONFIG_64BIT */ 108#endif /* CONFIG_64BIT */
109 109
110#define ZFCPDUMP_HSA_SIZE (32UL<<20)
111#define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20)
112
113/* 110/*
114 * Console mode. Override with conmode= 111 * Console mode. Override with conmode=
115 */ 112 */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index eb5f64d26d06..10e0fcd3633d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -111,6 +111,4 @@ static inline struct thread_info *current_thread_info(void)
111#define is_32bit_task() (1) 111#define is_32bit_task() (1)
112#endif 112#endif
113 113
114#define PREEMPT_ACTIVE 0x4000000
115
116#endif /* _ASM_THREAD_INFO_H */ 114#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a73eb2e1e918..bc9746a7d47c 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -26,8 +26,9 @@ struct vdso_data {
26 __u64 wtom_clock_nsec; /* 0x28 */ 26 __u64 wtom_clock_nsec; /* 0x28 */
27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ 27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
28 __u32 tz_dsttime; /* Type of dst correction 0x34 */ 28 __u32 tz_dsttime; /* Type of dst correction 0x34 */
29 __u32 ectg_available; 29 __u32 ectg_available; /* ECTG instruction present 0x38 */
30 __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */ 30 __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
31 __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
31}; 32};
32 33
33struct vdso_per_cpu_data { 34struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 2416138ebd3e..e4c99a183651 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -65,12 +65,14 @@ int main(void)
65 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 65 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
66 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 66 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
67 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 67 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
68 DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); 68 DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
69 DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
69 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 70 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
70 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); 71 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
71 /* constants used by the vdso */ 72 /* constants used by the vdso */
72 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); 73 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
73 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); 74 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
75 DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
74 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 76 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
75 BLANK(); 77 BLANK();
76 /* idle data offsets */ 78 /* idle data offsets */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6e2442978409..95e7ba0fbb7e 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -194,7 +194,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
194 return -EINVAL; 194 return -EINVAL;
195 195
196 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 196 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
197 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 197 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
198 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | 198 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
199 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | 199 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
200 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE); 200 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index f45b2ab0cb81..d7658c4b2ed5 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -95,7 +95,7 @@ static void *elfcorehdr_newmem;
95/* 95/*
96 * Copy one page from zfcpdump "oldmem" 96 * Copy one page from zfcpdump "oldmem"
97 * 97 *
98 * For pages below ZFCPDUMP_HSA_SIZE memory from the HSA is copied. Otherwise 98 * For pages below HSA size memory from the HSA is copied. Otherwise
99 * real memory copy is used. 99 * real memory copy is used.
100 */ 100 */
101static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize, 101static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
@@ -103,7 +103,7 @@ static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
103{ 103{
104 int rc; 104 int rc;
105 105
106 if (src < ZFCPDUMP_HSA_SIZE) { 106 if (src < sclp_get_hsa_size()) {
107 rc = memcpy_hsa(buf, src, csize, userbuf); 107 rc = memcpy_hsa(buf, src, csize, userbuf);
108 } else { 108 } else {
109 if (userbuf) 109 if (userbuf)
@@ -188,18 +188,19 @@ static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
188/* 188/*
189 * Remap "oldmem" for zfcpdump 189 * Remap "oldmem" for zfcpdump
190 * 190 *
191 * We only map available memory above ZFCPDUMP_HSA_SIZE. Memory below 191 * We only map available memory above HSA size. Memory below HSA size
192 * ZFCPDUMP_HSA_SIZE is read on demand using the copy_oldmem_page() function. 192 * is read on demand using the copy_oldmem_page() function.
193 */ 193 */
194static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma, 194static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
195 unsigned long from, 195 unsigned long from,
196 unsigned long pfn, 196 unsigned long pfn,
197 unsigned long size, pgprot_t prot) 197 unsigned long size, pgprot_t prot)
198{ 198{
199 unsigned long hsa_end = sclp_get_hsa_size();
199 unsigned long size_hsa; 200 unsigned long size_hsa;
200 201
201 if (pfn < ZFCPDUMP_HSA_SIZE >> PAGE_SHIFT) { 202 if (pfn < hsa_end >> PAGE_SHIFT) {
202 size_hsa = min(size, ZFCPDUMP_HSA_SIZE - (pfn << PAGE_SHIFT)); 203 size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT));
203 if (size == size_hsa) 204 if (size == size_hsa)
204 return 0; 205 return 0;
205 size -= size_hsa; 206 size -= size_hsa;
@@ -238,9 +239,9 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
238 return rc; 239 return rc;
239 } 240 }
240 } else { 241 } else {
241 if ((unsigned long) src < ZFCPDUMP_HSA_SIZE) { 242 unsigned long hsa_end = sclp_get_hsa_size();
242 copied = min(count, 243 if ((unsigned long) src < hsa_end) {
243 ZFCPDUMP_HSA_SIZE - (unsigned long) src); 244 copied = min(count, hsa_end - (unsigned long) src);
244 rc = memcpy_hsa(dest, (unsigned long) src, copied, 0); 245 rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
245 if (rc) 246 if (rc)
246 return rc; 247 return rc;
@@ -580,6 +581,9 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
580 /* If elfcorehdr= has been passed via cmdline, we use that one */ 581 /* If elfcorehdr= has been passed via cmdline, we use that one */
581 if (elfcorehdr_addr != ELFCORE_ADDR_MAX) 582 if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
582 return 0; 583 return 0;
584 /* If we cannot get HSA size for zfcpdump return error */
585 if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size())
586 return -ENODEV;
583 mem_chunk_cnt = get_mem_chunk_cnt(); 587 mem_chunk_cnt = get_mem_chunk_cnt();
584 588
585 alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + 589 alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 96543ac400a7..fca20b5fe79e 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -483,7 +483,7 @@ void __init startup_init(void)
483 detect_diag44(); 483 detect_diag44();
484 detect_machine_facilities(); 484 detect_machine_facilities();
485 setup_topology(); 485 setup_topology();
486 sclp_facilities_detect(); 486 sclp_early_detect();
487#ifdef CONFIG_DYNAMIC_FTRACE 487#ifdef CONFIG_DYNAMIC_FTRACE
488 S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; 488 S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
489#endif 489#endif
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 4a460c44e17e..813ec7260878 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
78PGM_CHECK_DEFAULT /* 35 */ 78PGM_CHECK_DEFAULT /* 35 */
79PGM_CHECK_DEFAULT /* 36 */ 79PGM_CHECK_DEFAULT /* 36 */
80PGM_CHECK_DEFAULT /* 37 */ 80PGM_CHECK_DEFAULT /* 37 */
81PGM_CHECK_DEFAULT /* 38 */ 81PGM_CHECK_64BIT(do_dat_exception) /* 38 */
82PGM_CHECK_64BIT(do_dat_exception) /* 39 */ 82PGM_CHECK_64BIT(do_dat_exception) /* 39 */
83PGM_CHECK_64BIT(do_dat_exception) /* 3a */ 83PGM_CHECK_64BIT(do_dat_exception) /* 3a */
84PGM_CHECK_64BIT(do_dat_exception) /* 3b */ 84PGM_CHECK_64BIT(do_dat_exception) /* 3b */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ffe1c53264a7..4444875266ee 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -471,8 +471,9 @@ static void __init setup_memory_end(void)
471 471
472 472
473#ifdef CONFIG_ZFCPDUMP 473#ifdef CONFIG_ZFCPDUMP
474 if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) { 474 if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
475 memory_end = ZFCPDUMP_HSA_SIZE; 475 !OLDMEM_BASE && sclp_get_hsa_size()) {
476 memory_end = sclp_get_hsa_size();
476 memory_end_set = 1; 477 memory_end_set = 1;
477 } 478 }
478#endif 479#endif
@@ -586,7 +587,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size,
586 crash_base = (chunk->addr + chunk->size) - crash_size; 587 crash_base = (chunk->addr + chunk->size) - crash_size;
587 if (crash_base < crash_size) 588 if (crash_base < crash_size)
588 continue; 589 continue;
589 if (crash_base < ZFCPDUMP_HSA_SIZE_MAX) 590 if (crash_base < sclp_get_hsa_size())
590 continue; 591 continue;
591 if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) 592 if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
592 continue; 593 continue;
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index fb535874a246..d8fd508ccd1e 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -94,7 +94,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
94 return -EINVAL; 94 return -EINVAL;
95 95
96 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 96 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
97 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 97 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
98 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); 98 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
99 /* Check for invalid user address space control. */ 99 /* Check for invalid user address space control. */
100 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) 100 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 064c3082ab33..dd95f1631621 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
108 set_clock_comparator(S390_lowcore.clock_comparator); 108 set_clock_comparator(S390_lowcore.clock_comparator);
109} 109}
110 110
111static int s390_next_ktime(ktime_t expires, 111static int s390_next_event(unsigned long delta,
112 struct clock_event_device *evt) 112 struct clock_event_device *evt)
113{ 113{
114 struct timespec ts; 114 S390_lowcore.clock_comparator = get_tod_clock() + delta;
115 u64 nsecs;
116
117 ts.tv_sec = ts.tv_nsec = 0;
118 monotonic_to_bootbased(&ts);
119 nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
120 do_div(nsecs, 125);
121 S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
122 /* Program the maximum value if we have an overflow (== year 2042) */
123 if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
124 S390_lowcore.clock_comparator = -1ULL;
125 set_clock_comparator(S390_lowcore.clock_comparator); 115 set_clock_comparator(S390_lowcore.clock_comparator);
126 return 0; 116 return 0;
127} 117}
@@ -146,15 +136,14 @@ void init_cpu_timer(void)
146 cpu = smp_processor_id(); 136 cpu = smp_processor_id();
147 cd = &per_cpu(comparators, cpu); 137 cd = &per_cpu(comparators, cpu);
148 cd->name = "comparator"; 138 cd->name = "comparator";
149 cd->features = CLOCK_EVT_FEAT_ONESHOT | 139 cd->features = CLOCK_EVT_FEAT_ONESHOT;
150 CLOCK_EVT_FEAT_KTIME;
151 cd->mult = 16777; 140 cd->mult = 16777;
152 cd->shift = 12; 141 cd->shift = 12;
153 cd->min_delta_ns = 1; 142 cd->min_delta_ns = 1;
154 cd->max_delta_ns = LONG_MAX; 143 cd->max_delta_ns = LONG_MAX;
155 cd->rating = 400; 144 cd->rating = 400;
156 cd->cpumask = cpumask_of(cpu); 145 cd->cpumask = cpumask_of(cpu);
157 cd->set_next_ktime = s390_next_ktime; 146 cd->set_next_event = s390_next_event;
158 cd->set_mode = s390_set_mode; 147 cd->set_mode = s390_set_mode;
159 148
160 clockevents_register_device(cd); 149 clockevents_register_device(cd);
@@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
221 return &clocksource_tod; 210 return &clocksource_tod;
222} 211}
223 212
224void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 213void update_vsyscall(struct timekeeper *tk)
225 struct clocksource *clock, u32 mult)
226{ 214{
227 if (clock != &clocksource_tod) 215 u64 nsecps;
216
217 if (tk->clock != &clocksource_tod)
228 return; 218 return;
229 219
230 /* Make userspace gettimeofday spin until we're done. */ 220 /* Make userspace gettimeofday spin until we're done. */
231 ++vdso_data->tb_update_count; 221 ++vdso_data->tb_update_count;
232 smp_wmb(); 222 smp_wmb();
233 vdso_data->xtime_tod_stamp = clock->cycle_last; 223 vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
234 vdso_data->xtime_clock_sec = wall_time->tv_sec; 224 vdso_data->xtime_clock_sec = tk->xtime_sec;
235 vdso_data->xtime_clock_nsec = wall_time->tv_nsec; 225 vdso_data->xtime_clock_nsec = tk->xtime_nsec;
236 vdso_data->wtom_clock_sec = wtm->tv_sec; 226 vdso_data->wtom_clock_sec =
237 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 227 tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
238 vdso_data->ntp_mult = mult; 228 vdso_data->wtom_clock_nsec = tk->xtime_nsec +
229 + (tk->wall_to_monotonic.tv_nsec << tk->shift);
230 nsecps = (u64) NSEC_PER_SEC << tk->shift;
231 while (vdso_data->wtom_clock_nsec >= nsecps) {
232 vdso_data->wtom_clock_nsec -= nsecps;
233 vdso_data->wtom_clock_sec++;
234 }
235 vdso_data->tk_mult = tk->mult;
236 vdso_data->tk_shift = tk->shift;
239 smp_wmb(); 237 smp_wmb();
240 ++vdso_data->tb_update_count; 238 ++vdso_data->tb_update_count;
241} 239}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index a84476f2a9bb..613649096783 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -125,7 +125,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
125 psal[i] = 0x80000000; 125 psal[i] = 0x80000000;
126 126
127 lowcore->paste[4] = (u32)(addr_t) psal; 127 lowcore->paste[4] = (u32)(addr_t) psal;
128 psal[0] = 0x20000000; 128 psal[0] = 0x02000000;
129 psal[2] = (u32)(addr_t) aste; 129 psal[2] = (u32)(addr_t) aste;
130 *(unsigned long *) (aste + 2) = segment_table + 130 *(unsigned long *) (aste + 2) = segment_table +
131 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; 131 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index b2224e0b974c..65fc3979c2f1 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -38,25 +38,21 @@ __kernel_clock_gettime:
38 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 38 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
39 brc 3,2f 39 brc 3,2f
40 ahi %r0,-1 40 ahi %r0,-1
412: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 412: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
42 lr %r2,%r0 42 lr %r2,%r0
43 l %r0,__VDSO_NTP_MULT(%r5) 43 l %r0,__VDSO_TK_MULT(%r5)
44 ltr %r1,%r1 44 ltr %r1,%r1
45 mr %r0,%r0 45 mr %r0,%r0
46 jnm 3f 46 jnm 3f
47 a %r0,__VDSO_NTP_MULT(%r5) 47 a %r0,__VDSO_TK_MULT(%r5)
483: alr %r0,%r2 483: alr %r0,%r2
49 srdl %r0,12 49 al %r0,__VDSO_WTOM_NSEC(%r5)
50 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
51 al %r1,__VDSO_XTIME_NSEC+4(%r5)
52 brc 12,4f
53 ahi %r0,1
544: l %r2,__VDSO_XTIME_SEC+4(%r5)
55 al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
56 al %r1,__VDSO_WTOM_NSEC+4(%r5) 50 al %r1,__VDSO_WTOM_NSEC+4(%r5)
57 brc 12,5f 51 brc 12,5f
58 ahi %r0,1 52 ahi %r0,1
595: al %r2,__VDSO_WTOM_SEC+4(%r5) 535: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
54 srdl %r0,0(%r2) /* >> tk->shift */
55 l %r2,__VDSO_WTOM_SEC+4(%r5)
60 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 56 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
61 jne 1b 57 jne 1b
62 basr %r5,0 58 basr %r5,0
@@ -86,20 +82,21 @@ __kernel_clock_gettime:
86 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 82 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
87 brc 3,12f 83 brc 3,12f
88 ahi %r0,-1 84 ahi %r0,-1
8912: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 8512: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
90 lr %r2,%r0 86 lr %r2,%r0
91 l %r0,__VDSO_NTP_MULT(%r5) 87 l %r0,__VDSO_TK_MULT(%r5)
92 ltr %r1,%r1 88 ltr %r1,%r1
93 mr %r0,%r0 89 mr %r0,%r0
94 jnm 13f 90 jnm 13f
95 a %r0,__VDSO_NTP_MULT(%r5) 91 a %r0,__VDSO_TK_MULT(%r5)
9613: alr %r0,%r2 9213: alr %r0,%r2
97 srdl %r0,12 93 al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
99 al %r1,__VDSO_XTIME_NSEC+4(%r5) 94 al %r1,__VDSO_XTIME_NSEC+4(%r5)
100 brc 12,14f 95 brc 12,14f
101 ahi %r0,1 96 ahi %r0,1
10214: l %r2,__VDSO_XTIME_SEC+4(%r5) 9714: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
98 srdl %r0,0(%r2) /* >> tk->shift */
99 l %r2,__VDSO_XTIME_SEC+4(%r5)
103 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 100 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
104 jne 11b 101 jne 11b
105 basr %r5,0 102 basr %r5,0
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 2d3633175e3b..fd621a950f7c 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -35,15 +35,14 @@ __kernel_gettimeofday:
35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 35 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
36 brc 3,3f 36 brc 3,3f
37 ahi %r0,-1 37 ahi %r0,-1
383: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 383: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
39 st %r0,24(%r15) 39 st %r0,24(%r15)
40 l %r0,__VDSO_NTP_MULT(%r5) 40 l %r0,__VDSO_TK_MULT(%r5)
41 ltr %r1,%r1 41 ltr %r1,%r1
42 mr %r0,%r0 42 mr %r0,%r0
43 jnm 4f 43 jnm 4f
44 a %r0,__VDSO_NTP_MULT(%r5) 44 a %r0,__VDSO_TK_MULT(%r5)
454: al %r0,24(%r15) 454: al %r0,24(%r15)
46 srdl %r0,12
47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 46 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
48 al %r1,__VDSO_XTIME_NSEC+4(%r5) 47 al %r1,__VDSO_XTIME_NSEC+4(%r5)
49 brc 12,5f 48 brc 12,5f
@@ -51,6 +50,8 @@ __kernel_gettimeofday:
515: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) 505: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
52 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 51 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
53 jne 1b 52 jne 1b
53 l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
54 srdl %r0,0(%r4) /* >> tk->shift */
54 l %r4,24(%r15) /* get tv_sec from stack */ 55 l %r4,24(%r15) /* get tv_sec from stack */
55 basr %r5,0 56 basr %r5,0
566: ltr %r0,%r0 576: ltr %r0,%r0
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 176e1f75f9aa..34deba7c7ed1 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -23,7 +23,9 @@ __kernel_clock_getres:
23 je 0f 23 je 0f
24 cghi %r2,__CLOCK_MONOTONIC 24 cghi %r2,__CLOCK_MONOTONIC
25 je 0f 25 je 0f
26 cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 26 cghi %r2,__CLOCK_THREAD_CPUTIME_ID
27 je 0f
28 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
27 jne 2f 29 jne 2f
28 larl %r5,_vdso_data 30 larl %r5,_vdso_data
29 icm %r0,15,__LC_ECTG_OK(%r5) 31 icm %r0,15,__LC_ECTG_OK(%r5)
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index d46c95ed5f19..91940ed33a4a 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -22,7 +22,9 @@ __kernel_clock_gettime:
22 larl %r5,_vdso_data 22 larl %r5,_vdso_data
23 cghi %r2,__CLOCK_REALTIME 23 cghi %r2,__CLOCK_REALTIME
24 je 4f 24 je 4f
25 cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 25 cghi %r2,__CLOCK_THREAD_CPUTIME_ID
26 je 9f
27 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
26 je 9f 28 je 9f
27 cghi %r2,__CLOCK_MONOTONIC 29 cghi %r2,__CLOCK_MONOTONIC
28 jne 12f 30 jne 12f
@@ -34,14 +36,13 @@ __kernel_clock_gettime:
34 tmll %r4,0x0001 /* pending update ? loop */ 36 tmll %r4,0x0001 /* pending update ? loop */
35 jnz 0b 37 jnz 0b
36 stck 48(%r15) /* Store TOD clock */ 38 stck 48(%r15) /* Store TOD clock */
39 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
40 lg %r0,__VDSO_WTOM_SEC(%r5)
37 lg %r1,48(%r15) 41 lg %r1,48(%r15)
38 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 42 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
39 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 43 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
40 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 44 alg %r1,__VDSO_WTOM_NSEC(%r5)
41 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 45 srlg %r1,%r1,0(%r2) /* >> tk->shift */
42 lg %r0,__VDSO_XTIME_SEC(%r5)
43 alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
44 alg %r0,__VDSO_WTOM_SEC(%r5)
45 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 46 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
46 jne 0b 47 jne 0b
47 larl %r5,13f 48 larl %r5,13f
@@ -62,12 +63,13 @@ __kernel_clock_gettime:
62 tmll %r4,0x0001 /* pending update ? loop */ 63 tmll %r4,0x0001 /* pending update ? loop */
63 jnz 5b 64 jnz 5b
64 stck 48(%r15) /* Store TOD clock */ 65 stck 48(%r15) /* Store TOD clock */
66 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
65 lg %r1,48(%r15) 67 lg %r1,48(%r15)
66 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 68 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
67 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 69 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
68 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 70 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
69 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 71 srlg %r1,%r1,0(%r2) /* >> tk->shift */
70 lg %r0,__VDSO_XTIME_SEC(%r5) 72 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
71 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 73 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
72 jne 5b 74 jne 5b
73 larl %r5,13f 75 larl %r5,13f
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 36ee674722ec..d0860d1d0ccc 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,12 +31,13 @@ __kernel_gettimeofday:
31 stck 48(%r15) /* Store TOD clock */ 31 stck 48(%r15) /* Store TOD clock */
32 lg %r1,48(%r15) 32 lg %r1,48(%r15)
33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
34 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 34 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
35 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 35 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ 36 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
37 lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
38 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 37 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
39 jne 0b 38 jne 0b
39 lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
40 srlg %r1,%r1,0(%r5) /* >> tk->shift */
40 larl %r5,5f 41 larl %r5,5f
412: clg %r1,0(%r5) 422: clg %r1,0(%r5)
42 jl 3f 43 jl 3f
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 97e03caf7825..dbdab3e7a1a6 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to,
78 * contains the (negative) exception code. 78 * contains the (negative) exception code.
79 */ 79 */
80#ifdef CONFIG_64BIT 80#ifdef CONFIG_64BIT
81
81static unsigned long follow_table(struct mm_struct *mm, 82static unsigned long follow_table(struct mm_struct *mm,
82 unsigned long address, int write) 83 unsigned long address, int write)
83{ 84{
84 unsigned long *table = (unsigned long *)__pa(mm->pgd); 85 unsigned long *table = (unsigned long *)__pa(mm->pgd);
85 86
87 if (unlikely(address > mm->context.asce_limit - 1))
88 return -0x38UL;
86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { 89 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
87 case _ASCE_TYPE_REGION1: 90 case _ASCE_TYPE_REGION1:
88 table = table + ((address >> 53) & 0x7ff); 91 table = table + ((address >> 53) & 0x7ff);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 0c9a17780e4b..bf7c73d71eef 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -530,20 +530,6 @@ static void zpci_unmap_resources(struct zpci_dev *zdev)
530 } 530 }
531} 531}
532 532
533struct zpci_dev *zpci_alloc_device(void)
534{
535 struct zpci_dev *zdev;
536
537 /* Alloc memory for our private pci device data */
538 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
539 return zdev ? : ERR_PTR(-ENOMEM);
540}
541
542void zpci_free_device(struct zpci_dev *zdev)
543{
544 kfree(zdev);
545}
546
547int pcibios_add_platform_entries(struct pci_dev *pdev) 533int pcibios_add_platform_entries(struct pci_dev *pdev)
548{ 534{
549 return zpci_sysfs_add_device(&pdev->dev); 535 return zpci_sysfs_add_device(&pdev->dev);
@@ -579,37 +565,6 @@ static void zpci_irq_exit(void)
579 unregister_adapter_interrupt(&zpci_airq); 565 unregister_adapter_interrupt(&zpci_airq);
580} 566}
581 567
582static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
583 unsigned long flags, int domain)
584{
585 struct resource *r;
586 char *name;
587 int rc;
588
589 r = kzalloc(sizeof(*r), GFP_KERNEL);
590 if (!r)
591 return ERR_PTR(-ENOMEM);
592 r->start = start;
593 r->end = r->start + size - 1;
594 r->flags = flags;
595 r->parent = &iomem_resource;
596 name = kmalloc(18, GFP_KERNEL);
597 if (!name) {
598 kfree(r);
599 return ERR_PTR(-ENOMEM);
600 }
601 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
602 r->name = name;
603
604 rc = request_resource(&iomem_resource, r);
605 if (rc) {
606 kfree(r->name);
607 kfree(r);
608 return ERR_PTR(-ENOMEM);
609 }
610 return r;
611}
612
613static int zpci_alloc_iomap(struct zpci_dev *zdev) 568static int zpci_alloc_iomap(struct zpci_dev *zdev)
614{ 569{
615 int entry; 570 int entry;
@@ -633,6 +588,82 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
633 spin_unlock(&zpci_iomap_lock); 588 spin_unlock(&zpci_iomap_lock);
634} 589}
635 590
591static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
592 unsigned long size, unsigned long flags)
593{
594 struct resource *r;
595
596 r = kzalloc(sizeof(*r), GFP_KERNEL);
597 if (!r)
598 return NULL;
599
600 r->start = start;
601 r->end = r->start + size - 1;
602 r->flags = flags;
603 r->name = zdev->res_name;
604
605 if (request_resource(&iomem_resource, r)) {
606 kfree(r);
607 return NULL;
608 }
609 return r;
610}
611
612static int zpci_setup_bus_resources(struct zpci_dev *zdev,
613 struct list_head *resources)
614{
615 unsigned long addr, size, flags;
616 struct resource *res;
617 int i, entry;
618
619 snprintf(zdev->res_name, sizeof(zdev->res_name),
620 "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
621
622 for (i = 0; i < PCI_BAR_COUNT; i++) {
623 if (!zdev->bars[i].size)
624 continue;
625 entry = zpci_alloc_iomap(zdev);
626 if (entry < 0)
627 return entry;
628 zdev->bars[i].map_idx = entry;
629
630 /* only MMIO is supported */
631 flags = IORESOURCE_MEM;
632 if (zdev->bars[i].val & 8)
633 flags |= IORESOURCE_PREFETCH;
634 if (zdev->bars[i].val & 4)
635 flags |= IORESOURCE_MEM_64;
636
637 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
638
639 size = 1UL << zdev->bars[i].size;
640
641 res = __alloc_res(zdev, addr, size, flags);
642 if (!res) {
643 zpci_free_iomap(zdev, entry);
644 return -ENOMEM;
645 }
646 zdev->bars[i].res = res;
647 pci_add_resource(resources, res);
648 }
649
650 return 0;
651}
652
653static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
654{
655 int i;
656
657 for (i = 0; i < PCI_BAR_COUNT; i++) {
658 if (!zdev->bars[i].size)
659 continue;
660
661 zpci_free_iomap(zdev, zdev->bars[i].map_idx);
662 release_resource(zdev->bars[i].res);
663 kfree(zdev->bars[i].res);
664 }
665}
666
636int pcibios_add_device(struct pci_dev *pdev) 667int pcibios_add_device(struct pci_dev *pdev)
637{ 668{
638 struct zpci_dev *zdev = get_zdev(pdev); 669 struct zpci_dev *zdev = get_zdev(pdev);
@@ -729,52 +760,6 @@ struct dev_pm_ops pcibios_pm_ops = {
729}; 760};
730#endif /* CONFIG_HIBERNATE_CALLBACKS */ 761#endif /* CONFIG_HIBERNATE_CALLBACKS */
731 762
732static int zpci_scan_bus(struct zpci_dev *zdev)
733{
734 struct resource *res;
735 LIST_HEAD(resources);
736 int i;
737
738 /* allocate mapping entry for each used bar */
739 for (i = 0; i < PCI_BAR_COUNT; i++) {
740 unsigned long addr, size, flags;
741 int entry;
742
743 if (!zdev->bars[i].size)
744 continue;
745 entry = zpci_alloc_iomap(zdev);
746 if (entry < 0)
747 return entry;
748 zdev->bars[i].map_idx = entry;
749
750 /* only MMIO is supported */
751 flags = IORESOURCE_MEM;
752 if (zdev->bars[i].val & 8)
753 flags |= IORESOURCE_PREFETCH;
754 if (zdev->bars[i].val & 4)
755 flags |= IORESOURCE_MEM_64;
756
757 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
758
759 size = 1UL << zdev->bars[i].size;
760
761 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
762 if (IS_ERR(res)) {
763 zpci_free_iomap(zdev, entry);
764 return PTR_ERR(res);
765 }
766 pci_add_resource(&resources, res);
767 }
768
769 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
770 zdev, &resources);
771 if (!zdev->bus)
772 return -EIO;
773
774 zdev->bus->max_bus_speed = zdev->max_bus_speed;
775 return 0;
776}
777
778static int zpci_alloc_domain(struct zpci_dev *zdev) 763static int zpci_alloc_domain(struct zpci_dev *zdev)
779{ 764{
780 spin_lock(&zpci_domain_lock); 765 spin_lock(&zpci_domain_lock);
@@ -795,6 +780,41 @@ static void zpci_free_domain(struct zpci_dev *zdev)
795 spin_unlock(&zpci_domain_lock); 780 spin_unlock(&zpci_domain_lock);
796} 781}
797 782
783void pcibios_remove_bus(struct pci_bus *bus)
784{
785 struct zpci_dev *zdev = get_zdev_by_bus(bus);
786
787 zpci_exit_slot(zdev);
788 zpci_cleanup_bus_resources(zdev);
789 zpci_free_domain(zdev);
790
791 spin_lock(&zpci_list_lock);
792 list_del(&zdev->entry);
793 spin_unlock(&zpci_list_lock);
794
795 kfree(zdev);
796}
797
798static int zpci_scan_bus(struct zpci_dev *zdev)
799{
800 LIST_HEAD(resources);
801 int ret;
802
803 ret = zpci_setup_bus_resources(zdev, &resources);
804 if (ret)
805 return ret;
806
807 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
808 zdev, &resources);
809 if (!zdev->bus) {
810 zpci_cleanup_bus_resources(zdev);
811 return -EIO;
812 }
813
814 zdev->bus->max_bus_speed = zdev->max_bus_speed;
815 return 0;
816}
817
798int zpci_enable_device(struct zpci_dev *zdev) 818int zpci_enable_device(struct zpci_dev *zdev)
799{ 819{
800 int rc; 820 int rc;
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 84147984224a..c747394029ee 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -155,9 +155,9 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
155 int rc; 155 int rc;
156 156
157 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured); 157 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
158 zdev = zpci_alloc_device(); 158 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
159 if (IS_ERR(zdev)) 159 if (!zdev)
160 return PTR_ERR(zdev); 160 return -ENOMEM;
161 161
162 zdev->fh = fh; 162 zdev->fh = fh;
163 zdev->fid = fid; 163 zdev->fid = fid;
@@ -178,7 +178,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
178 return 0; 178 return 0;
179 179
180error: 180error:
181 zpci_free_device(zdev); 181 kfree(zdev);
182 return rc; 182 return rc;
183} 183}
184 184
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 278e671ec9ac..800f064b0da7 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <asm/pci_debug.h> 13#include <asm/pci_debug.h>
14#include <asm/sclp.h>
14 15
15/* Content Code Description for PCI Function Error */ 16/* Content Code Description for PCI Function Error */
16struct zpci_ccdf_err { 17struct zpci_ccdf_err {
@@ -42,10 +43,27 @@ struct zpci_ccdf_avail {
42 u16 pec; /* PCI event code */ 43 u16 pec; /* PCI event code */
43} __packed; 44} __packed;
44 45
45static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf) 46void zpci_event_error(void *data)
46{ 47{
48 struct zpci_ccdf_err *ccdf = data;
49 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
50
51 zpci_err("error CCDF:\n");
52 zpci_err_hex(ccdf, sizeof(*ccdf));
53
54 if (!zdev)
55 return;
56
57 pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
58 pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
59}
60
61void zpci_event_availability(void *data)
62{
63 struct zpci_ccdf_avail *ccdf = data;
47 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); 64 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
48 struct pci_dev *pdev = zdev ? zdev->pdev : NULL; 65 struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
66 int ret;
49 67
50 pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n", 68 pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n",
51 pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); 69 pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
@@ -53,36 +71,47 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
53 zpci_err_hex(ccdf, sizeof(*ccdf)); 71 zpci_err_hex(ccdf, sizeof(*ccdf));
54 72
55 switch (ccdf->pec) { 73 switch (ccdf->pec) {
56 case 0x0301: 74 case 0x0301: /* Standby -> Configured */
57 zpci_enable_device(zdev); 75 if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED)
76 break;
77 zdev->state = ZPCI_FN_STATE_CONFIGURED;
78 ret = zpci_enable_device(zdev);
79 if (ret)
80 break;
81 pci_rescan_bus(zdev->bus);
58 break; 82 break;
59 case 0x0302: 83 case 0x0302: /* Reserved -> Standby */
60 clp_add_pci_device(ccdf->fid, ccdf->fh, 0); 84 clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
61 break; 85 break;
62 case 0x0306: 86 case 0x0303: /* Deconfiguration requested */
87 if (pdev)
88 pci_stop_and_remove_bus_device(pdev);
89
90 ret = zpci_disable_device(zdev);
91 if (ret)
92 break;
93
94 ret = sclp_pci_deconfigure(zdev->fid);
95 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
96 if (!ret)
97 zdev->state = ZPCI_FN_STATE_STANDBY;
98
99 break;
100 case 0x0304: /* Configured -> Standby */
101 if (pdev)
102 pci_stop_and_remove_bus_device(pdev);
103
104 zpci_disable_device(zdev);
105 zdev->state = ZPCI_FN_STATE_STANDBY;
106 break;
107 case 0x0306: /* 0x308 or 0x302 for multiple devices */
63 clp_rescan_pci_devices(); 108 clp_rescan_pci_devices();
64 break; 109 break;
110 case 0x0308: /* Standby -> Reserved */
111 pci_stop_root_bus(zdev->bus);
112 pci_remove_root_bus(zdev->bus);
113 break;
65 default: 114 default:
66 break; 115 break;
67 } 116 }
68} 117}
69
70void zpci_event_error(void *data)
71{
72 struct zpci_ccdf_err *ccdf = data;
73 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
74
75 zpci_err("error CCDF:\n");
76 zpci_err_hex(ccdf, sizeof(*ccdf));
77
78 if (!zdev)
79 return;
80
81 pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
82 pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
83}
84
85void zpci_event_availability(void *data)
86{
87 zpci_event_log_avail(data);
88}
diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h
index 1425cc034872..656b7ada9326 100644
--- a/arch/score/include/asm/thread_info.h
+++ b/arch/score/include/asm/thread_info.h
@@ -72,8 +72,6 @@ register struct thread_info *__current_thread_info __asm__("r28");
72 72
73#endif /* !__ASSEMBLY__ */ 73#endif /* !__ASSEMBLY__ */
74 74
75#define PREEMPT_ACTIVE 0x10000000
76
77/* 75/*
78 * thread information flags 76 * thread information flags
79 * - these are process state flags that various assembly files may need to 77 * - these are process state flags that various assembly files may need to
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 45a93669289d..ad27ffa65e2e 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -41,8 +41,6 @@ struct thread_info {
41 41
42#endif 42#endif
43 43
44#define PREEMPT_ACTIVE 0x10000000
45
46#if defined(CONFIG_4KSTACKS) 44#if defined(CONFIG_4KSTACKS)
47#define THREAD_SHIFT 12 45#define THREAD_SHIFT 12
48#else 46#else
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 9b6e4beeb296..ca46834294b7 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -108,7 +108,7 @@ need_resched:
108 and #(0xf0>>1), r0 ! interrupts off (exception path)? 108 and #(0xf0>>1), r0 ! interrupts off (exception path)?
109 cmp/eq #(0xf0>>1), r0 109 cmp/eq #(0xf0>>1), r0
110 bt noresched 110 bt noresched
111 mov.l 3f, r0 111 mov.l 1f, r0
112 jsr @r0 ! call preempt_schedule_irq 112 jsr @r0 ! call preempt_schedule_irq
113 nop 113 nop
114 bra need_resched 114 bra need_resched
@@ -119,9 +119,7 @@ noresched:
119 nop 119 nop
120 120
121 .align 2 121 .align 2
1221: .long PREEMPT_ACTIVE 1221: .long preempt_schedule_irq
1232: .long schedule
1243: .long preempt_schedule_irq
125#endif 123#endif
126 124
127ENTRY(resume_userspace) 125ENTRY(resume_userspace)
diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h
index 162007643cdc..ee93923b7f82 100644
--- a/arch/sparc/include/asm/hardirq_32.h
+++ b/arch/sparc/include/asm/hardirq_32.h
@@ -7,7 +7,6 @@
7#ifndef __SPARC_HARDIRQ_H 7#ifndef __SPARC_HARDIRQ_H
8#define __SPARC_HARDIRQ_H 8#define __SPARC_HARDIRQ_H
9 9
10#define HARDIRQ_BITS 8
11#include <asm-generic/hardirq.h> 10#include <asm-generic/hardirq.h>
12 11
13#endif /* __SPARC_HARDIRQ_H */ 12#endif /* __SPARC_HARDIRQ_H */
diff --git a/arch/sparc/include/asm/hardirq_64.h b/arch/sparc/include/asm/hardirq_64.h
index 7c29fd1a87aa..f478ff1ddd02 100644
--- a/arch/sparc/include/asm/hardirq_64.h
+++ b/arch/sparc/include/asm/hardirq_64.h
@@ -14,6 +14,4 @@
14 14
15void ack_bad_irq(unsigned int irq); 15void ack_bad_irq(unsigned int irq);
16 16
17#define HARDIRQ_BITS 8
18
19#endif /* !(__SPARC64_HARDIRQ_H) */ 17#endif /* !(__SPARC64_HARDIRQ_H) */
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index dd3807599bb9..96efa7adc223 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -105,8 +105,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
105#define TI_W_SAVED 0x250 105#define TI_W_SAVED 0x250
106/* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */ 106/* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */
107 107
108#define PREEMPT_ACTIVE 0x4000000
109
110/* 108/*
111 * thread information flag bit numbers 109 * thread information flag bit numbers
112 */ 110 */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 5d9292ab1077..a5f01ac6d0f1 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -111,8 +111,6 @@ struct thread_info {
111#define THREAD_SHIFT PAGE_SHIFT 111#define THREAD_SHIFT PAGE_SHIFT
112#endif /* PAGE_SHIFT == 13 */ 112#endif /* PAGE_SHIFT == 13 */
113 113
114#define PREEMPT_ACTIVE 0x10000000
115
116/* 114/*
117 * macros/functions for gaining access to the thread information structure 115 * macros/functions for gaining access to the thread information structure
118 */ 116 */
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index f0d6a9700f4c..3c3c89f52643 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -1,7 +1,6 @@
1#ifndef _SPARC64_TLBFLUSH_H 1#ifndef _SPARC64_TLBFLUSH_H
2#define _SPARC64_TLBFLUSH_H 2#define _SPARC64_TLBFLUSH_H
3 3
4#include <linux/mm.h>
5#include <asm/mmu_context.h> 4#include <asm/mmu_context.h>
6 5
7/* TSB flush operations. */ 6/* TSB flush operations. */
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index a954eb81881b..39f0c662f4c8 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -312,12 +312,10 @@ to_kernel:
312 nop 312 nop
313 cmp %l4, 0 313 cmp %l4, 0
314 bne,pn %xcc, kern_fpucheck 314 bne,pn %xcc, kern_fpucheck
315 sethi %hi(PREEMPT_ACTIVE), %l6 315 nop
316 stw %l6, [%g6 + TI_PRE_COUNT] 316 call preempt_schedule_irq
317 call schedule
318 nop 317 nop
319 ba,pt %xcc, rtrap 318 ba,pt %xcc, rtrap
320 stw %g0, [%g6 + TI_PRE_COUNT]
321#endif 319#endif
322kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 320kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
323 brz,pt %l5, rt_continue 321 brz,pt %l5, rt_continue
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 6b643790e4fe..5322e530d09c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2565,8 +2565,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm,
2565{ 2565{
2566 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 2566 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2567 __GFP_REPEAT | __GFP_ZERO); 2567 __GFP_REPEAT | __GFP_ZERO);
2568 pte_t *pte = NULL;
2569
2570 if (!page) 2568 if (!page)
2571 return NULL; 2569 return NULL;
2572 if (!pgtable_page_ctor(page)) { 2570 if (!pgtable_page_ctor(page)) {
diff --git a/arch/tile/include/asm/hardirq.h b/arch/tile/include/asm/hardirq.h
index 822390f9a154..54110af23985 100644
--- a/arch/tile/include/asm/hardirq.h
+++ b/arch/tile/include/asm/hardirq.h
@@ -42,6 +42,4 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
42 42
43#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 43#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
44 44
45#define HARDIRQ_BITS 8
46
47#endif /* _ASM_TILE_HARDIRQ_H */ 45#endif /* _ASM_TILE_HARDIRQ_H */
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index b8aa6df3e102..729aa107f64e 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -113,8 +113,6 @@ extern void _cpu_idle(void);
113 113
114#endif /* !__ASSEMBLY__ */ 114#endif /* !__ASSEMBLY__ */
115 115
116#define PREEMPT_ACTIVE 0x10000000
117
118/* 116/*
119 * Thread information flags that various assembly files may need to access. 117 * Thread information flags that various assembly files may need to access.
120 * Keep flags accessed frequently in low bits, particular since it makes 118 * Keep flags accessed frequently in low bits, particular since it makes
diff --git a/arch/um/Kconfig.char b/arch/um/Kconfig.char
index b9d7c4276682..f10738d68b2d 100644
--- a/arch/um/Kconfig.char
+++ b/arch/um/Kconfig.char
@@ -6,10 +6,6 @@ config STDERR_CONSOLE
6 help 6 help
7 console driver which dumps all printk messages to stderr. 7 console driver which dumps all printk messages to stderr.
8 8
9config STDIO_CONSOLE
10 bool
11 default y
12
13config SSL 9config SSL
14 bool "Virtual serial line" 10 bool "Virtual serial line"
15 help 11 help
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 8ddea1f8006a..21ca44c4f6d5 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -1,8 +1,3 @@
1config DEFCONFIG_LIST
2 string
3 option defconfig_list
4 default "arch/$ARCH/defconfig"
5
6config UML 1config UML
7 bool 2 bool
8 default y 3 default y
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 133f7de2a13d..36e658a4291c 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -6,6 +6,17 @@
6# Licensed under the GPL 6# Licensed under the GPL
7# 7#
8 8
9# select defconfig based on actual architecture
10ifeq ($(SUBARCH),x86)
11 ifeq ($(shell uname -m),x86_64)
12 KBUILD_DEFCONFIG := x86_64_defconfig
13 else
14 KBUILD_DEFCONFIG := i386_defconfig
15 endif
16else
17 KBUILD_DEFCONFIG := $(SUBARCH)_defconfig
18endif
19
9ARCH_DIR := arch/um 20ARCH_DIR := arch/um
10OS := $(shell uname -s) 21OS := $(shell uname -s)
11# We require bash because the vmlinux link and loader script cpp use bash 22# We require bash because the vmlinux link and loader script cpp use bash
@@ -22,12 +33,11 @@ MODE_INCLUDE += -I$(srctree)/$(ARCH_DIR)/include/shared/skas
22 33
23HEADER_ARCH := $(SUBARCH) 34HEADER_ARCH := $(SUBARCH)
24 35
25# Additional ARCH settings for x86 36ifneq ($(filter $(SUBARCH),x86 x86_64 i386),)
26ifeq ($(SUBARCH),i386) 37 HEADER_ARCH := x86
27 HEADER_ARCH := x86
28endif 38endif
29ifeq ($(SUBARCH),x86_64) 39
30 HEADER_ARCH := x86 40ifdef CONFIG_64BIT
31 KBUILD_CFLAGS += -mcmodel=large 41 KBUILD_CFLAGS += -mcmodel=large
32endif 42endif
33 43
diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig
new file mode 100644
index 000000000000..a12bf68c9f3a
--- /dev/null
+++ b/arch/um/configs/i386_defconfig
@@ -0,0 +1,76 @@
1CONFIG_3_LEVEL_PGTABLES=y
2# CONFIG_COMPACTION is not set
3CONFIG_BINFMT_MISC=m
4CONFIG_HOSTFS=y
5CONFIG_MAGIC_SYSRQ=y
6CONFIG_KERNEL_STACK_ORDER=1
7CONFIG_SYSVIPC=y
8CONFIG_POSIX_MQUEUE=y
9CONFIG_NO_HZ=y
10CONFIG_HIGH_RES_TIMERS=y
11CONFIG_BSD_PROCESS_ACCT=y
12CONFIG_IKCONFIG=y
13CONFIG_IKCONFIG_PROC=y
14CONFIG_LOG_BUF_SHIFT=14
15CONFIG_CGROUPS=y
16CONFIG_CGROUP_FREEZER=y
17CONFIG_CGROUP_DEVICE=y
18CONFIG_CPUSETS=y
19CONFIG_CGROUP_CPUACCT=y
20CONFIG_RESOURCE_COUNTERS=y
21CONFIG_CGROUP_SCHED=y
22CONFIG_BLK_CGROUP=y
23# CONFIG_PID_NS is not set
24CONFIG_SYSFS_DEPRECATED=y
25CONFIG_CC_OPTIMIZE_FOR_SIZE=y
26CONFIG_SLAB=y
27CONFIG_MODULES=y
28CONFIG_MODULE_UNLOAD=y
29# CONFIG_BLK_DEV_BSG is not set
30CONFIG_IOSCHED_CFQ=m
31CONFIG_SSL=y
32CONFIG_NULL_CHAN=y
33CONFIG_PORT_CHAN=y
34CONFIG_PTY_CHAN=y
35CONFIG_TTY_CHAN=y
36CONFIG_XTERM_CHAN=y
37CONFIG_CON_CHAN="pts"
38CONFIG_SSL_CHAN="pts"
39CONFIG_UML_SOUND=m
40CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
41CONFIG_DEVTMPFS=y
42CONFIG_DEVTMPFS_MOUNT=y
43CONFIG_BLK_DEV_UBD=y
44CONFIG_BLK_DEV_LOOP=m
45CONFIG_BLK_DEV_NBD=m
46CONFIG_DUMMY=m
47CONFIG_TUN=m
48CONFIG_PPP=m
49CONFIG_SLIP=m
50CONFIG_LEGACY_PTY_COUNT=32
51# CONFIG_HW_RANDOM is not set
52CONFIG_UML_RANDOM=y
53CONFIG_NET=y
54CONFIG_PACKET=y
55CONFIG_UNIX=y
56CONFIG_INET=y
57# CONFIG_INET_LRO is not set
58# CONFIG_IPV6 is not set
59CONFIG_UML_NET=y
60CONFIG_UML_NET_ETHERTAP=y
61CONFIG_UML_NET_TUNTAP=y
62CONFIG_UML_NET_SLIP=y
63CONFIG_UML_NET_DAEMON=y
64CONFIG_UML_NET_MCAST=y
65CONFIG_UML_NET_SLIRP=y
66CONFIG_EXT4_FS=y
67CONFIG_REISERFS_FS=y
68CONFIG_QUOTA=y
69CONFIG_AUTOFS4_FS=m
70CONFIG_ISO9660_FS=m
71CONFIG_JOLIET=y
72CONFIG_PROC_KCORE=y
73CONFIG_TMPFS=y
74CONFIG_NLS=y
75CONFIG_DEBUG_INFO=y
76CONFIG_DEBUG_KERNEL=y
diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig
new file mode 100644
index 000000000000..3aab117bd553
--- /dev/null
+++ b/arch/um/configs/x86_64_defconfig
@@ -0,0 +1,75 @@
1# CONFIG_COMPACTION is not set
2CONFIG_BINFMT_MISC=m
3CONFIG_HOSTFS=y
4CONFIG_MAGIC_SYSRQ=y
5CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y
7CONFIG_NO_HZ=y
8CONFIG_HIGH_RES_TIMERS=y
9CONFIG_BSD_PROCESS_ACCT=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=14
13CONFIG_CGROUPS=y
14CONFIG_CGROUP_FREEZER=y
15CONFIG_CGROUP_DEVICE=y
16CONFIG_CPUSETS=y
17CONFIG_CGROUP_CPUACCT=y
18CONFIG_RESOURCE_COUNTERS=y
19CONFIG_CGROUP_SCHED=y
20CONFIG_BLK_CGROUP=y
21# CONFIG_PID_NS is not set
22CONFIG_SYSFS_DEPRECATED=y
23CONFIG_CC_OPTIMIZE_FOR_SIZE=y
24CONFIG_SLAB=y
25CONFIG_MODULES=y
26CONFIG_MODULE_UNLOAD=y
27# CONFIG_BLK_DEV_BSG is not set
28CONFIG_IOSCHED_CFQ=m
29CONFIG_SSL=y
30CONFIG_NULL_CHAN=y
31CONFIG_PORT_CHAN=y
32CONFIG_PTY_CHAN=y
33CONFIG_TTY_CHAN=y
34CONFIG_XTERM_CHAN=y
35CONFIG_CON_CHAN="pts"
36CONFIG_SSL_CHAN="pts"
37CONFIG_UML_SOUND=m
38CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
39CONFIG_DEVTMPFS=y
40CONFIG_DEVTMPFS_MOUNT=y
41CONFIG_BLK_DEV_UBD=y
42CONFIG_BLK_DEV_LOOP=m
43CONFIG_BLK_DEV_NBD=m
44CONFIG_DUMMY=m
45CONFIG_TUN=m
46CONFIG_PPP=m
47CONFIG_SLIP=m
48CONFIG_LEGACY_PTY_COUNT=32
49# CONFIG_HW_RANDOM is not set
50CONFIG_UML_RANDOM=y
51CONFIG_NET=y
52CONFIG_PACKET=y
53CONFIG_UNIX=y
54CONFIG_INET=y
55# CONFIG_INET_LRO is not set
56# CONFIG_IPV6 is not set
57CONFIG_UML_NET=y
58CONFIG_UML_NET_ETHERTAP=y
59CONFIG_UML_NET_TUNTAP=y
60CONFIG_UML_NET_SLIP=y
61CONFIG_UML_NET_DAEMON=y
62CONFIG_UML_NET_MCAST=y
63CONFIG_UML_NET_SLIRP=y
64CONFIG_EXT4_FS=y
65CONFIG_REISERFS_FS=y
66CONFIG_QUOTA=y
67CONFIG_AUTOFS4_FS=m
68CONFIG_ISO9660_FS=m
69CONFIG_JOLIET=y
70CONFIG_PROC_KCORE=y
71CONFIG_TMPFS=y
72CONFIG_NLS=y
73CONFIG_DEBUG_INFO=y
74CONFIG_FRAME_WARN=1024
75CONFIG_DEBUG_KERNEL=y
diff --git a/arch/um/defconfig b/arch/um/defconfig
deleted file mode 100644
index 2665e6b683f5..000000000000
--- a/arch/um/defconfig
+++ /dev/null
@@ -1,899 +0,0 @@
1#
2# Automatically generated file; DO NOT EDIT.
3# User Mode Linux/i386 3.3.0 Kernel Configuration
4#
5CONFIG_DEFCONFIG_LIST="arch/$ARCH/defconfig"
6CONFIG_UML=y
7CONFIG_MMU=y
8CONFIG_NO_IOMEM=y
9# CONFIG_TRACE_IRQFLAGS_SUPPORT is not set
10CONFIG_LOCKDEP_SUPPORT=y
11# CONFIG_STACKTRACE_SUPPORT is not set
12CONFIG_GENERIC_CALIBRATE_DELAY=y
13CONFIG_GENERIC_BUG=y
14CONFIG_GENERIC_CLOCKEVENTS=y
15CONFIG_HZ=100
16
17#
18# UML-specific options
19#
20
21#
22# Host processor type and features
23#
24# CONFIG_M486 is not set
25# CONFIG_M586 is not set
26# CONFIG_M586TSC is not set
27# CONFIG_M586MMX is not set
28CONFIG_M686=y
29# CONFIG_MPENTIUMII is not set
30# CONFIG_MPENTIUMIII is not set
31# CONFIG_MPENTIUMM is not set
32# CONFIG_MPENTIUM4 is not set
33# CONFIG_MK6 is not set
34# CONFIG_MK7 is not set
35# CONFIG_MK8 is not set
36# CONFIG_MCRUSOE is not set
37# CONFIG_MEFFICEON is not set
38# CONFIG_MWINCHIPC6 is not set
39# CONFIG_MWINCHIP3D is not set
40# CONFIG_MELAN is not set
41# CONFIG_MGEODEGX1 is not set
42# CONFIG_MGEODE_LX is not set
43# CONFIG_MCYRIXIII is not set
44# CONFIG_MVIAC3_2 is not set
45# CONFIG_MVIAC7 is not set
46# CONFIG_MCORE2 is not set
47# CONFIG_MATOM is not set
48# CONFIG_X86_GENERIC is not set
49CONFIG_X86_INTERNODE_CACHE_SHIFT=5
50CONFIG_X86_CMPXCHG=y
51CONFIG_X86_L1_CACHE_SHIFT=5
52CONFIG_X86_XADD=y
53CONFIG_X86_PPRO_FENCE=y
54CONFIG_X86_WP_WORKS_OK=y
55CONFIG_X86_INVLPG=y
56CONFIG_X86_BSWAP=y
57CONFIG_X86_POPAD_OK=y
58CONFIG_X86_USE_PPRO_CHECKSUM=y
59CONFIG_X86_TSC=y
60CONFIG_X86_CMPXCHG64=y
61CONFIG_X86_CMOV=y
62CONFIG_X86_MINIMUM_CPU_FAMILY=5
63CONFIG_CPU_SUP_INTEL=y
64CONFIG_CPU_SUP_CYRIX_32=y
65CONFIG_CPU_SUP_AMD=y
66CONFIG_CPU_SUP_CENTAUR=y
67CONFIG_CPU_SUP_TRANSMETA_32=y
68CONFIG_CPU_SUP_UMC_32=y
69CONFIG_UML_X86=y
70# CONFIG_64BIT is not set
71CONFIG_X86_32=y
72# CONFIG_X86_64 is not set
73# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
74CONFIG_RWSEM_GENERIC_SPINLOCK=y
75# CONFIG_3_LEVEL_PGTABLES is not set
76CONFIG_ARCH_HAS_SC_SIGNALS=y
77CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA=y
78CONFIG_GENERIC_HWEIGHT=y
79# CONFIG_STATIC_LINK is not set
80CONFIG_SELECT_MEMORY_MODEL=y
81CONFIG_FLATMEM_MANUAL=y
82CONFIG_FLATMEM=y
83CONFIG_FLAT_NODE_MEM_MAP=y
84CONFIG_PAGEFLAGS_EXTENDED=y
85CONFIG_SPLIT_PTLOCK_CPUS=4
86# CONFIG_COMPACTION is not set
87# CONFIG_PHYS_ADDR_T_64BIT is not set
88CONFIG_ZONE_DMA_FLAG=0
89CONFIG_VIRT_TO_BUS=y
90# CONFIG_KSM is not set
91CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
92CONFIG_NEED_PER_CPU_KM=y
93# CONFIG_CLEANCACHE is not set
94CONFIG_TICK_ONESHOT=y
95CONFIG_NO_HZ=y
96CONFIG_HIGH_RES_TIMERS=y
97CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
98CONFIG_LD_SCRIPT_DYN=y
99CONFIG_BINFMT_ELF=y
100CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
101CONFIG_HAVE_AOUT=y
102# CONFIG_BINFMT_AOUT is not set
103CONFIG_BINFMT_MISC=m
104CONFIG_HOSTFS=y
105# CONFIG_HPPFS is not set
106CONFIG_MCONSOLE=y
107CONFIG_MAGIC_SYSRQ=y
108CONFIG_KERNEL_STACK_ORDER=0
109# CONFIG_MMAPPER is not set
110CONFIG_NO_DMA=y
111
112#
113# General setup
114#
115CONFIG_EXPERIMENTAL=y
116CONFIG_BROKEN_ON_SMP=y
117CONFIG_INIT_ENV_ARG_LIMIT=128
118CONFIG_CROSS_COMPILE=""
119CONFIG_LOCALVERSION=""
120CONFIG_LOCALVERSION_AUTO=y
121CONFIG_DEFAULT_HOSTNAME="(none)"
122CONFIG_SWAP=y
123CONFIG_SYSVIPC=y
124CONFIG_SYSVIPC_SYSCTL=y
125CONFIG_POSIX_MQUEUE=y
126CONFIG_POSIX_MQUEUE_SYSCTL=y
127CONFIG_BSD_PROCESS_ACCT=y
128# CONFIG_BSD_PROCESS_ACCT_V3 is not set
129# CONFIG_FHANDLE is not set
130# CONFIG_TASKSTATS is not set
131# CONFIG_AUDIT is not set
132
133#
134# IRQ subsystem
135#
136CONFIG_GENERIC_IRQ_SHOW=y
137
138#
139# RCU Subsystem
140#
141CONFIG_TINY_RCU=y
142# CONFIG_PREEMPT_RCU is not set
143# CONFIG_RCU_TRACE is not set
144# CONFIG_TREE_RCU_TRACE is not set
145CONFIG_IKCONFIG=y
146CONFIG_IKCONFIG_PROC=y
147CONFIG_LOG_BUF_SHIFT=14
148CONFIG_CGROUPS=y
149# CONFIG_CGROUP_DEBUG is not set
150CONFIG_CGROUP_FREEZER=y
151CONFIG_CGROUP_DEVICE=y
152CONFIG_CPUSETS=y
153CONFIG_PROC_PID_CPUSET=y
154CONFIG_CGROUP_CPUACCT=y
155CONFIG_RESOURCE_COUNTERS=y
156CONFIG_CGROUP_MEMCG=y
157CONFIG_CGROUP_MEMCG_SWAP=y
158# CONFIG_CGROUP_MEMCG_SWAP_ENABLED is not set
159# CONFIG_CGROUP_MEMCG_KMEM is not set
160CONFIG_CGROUP_SCHED=y
161CONFIG_FAIR_GROUP_SCHED=y
162# CONFIG_CFS_BANDWIDTH is not set
163# CONFIG_RT_GROUP_SCHED is not set
164CONFIG_BLK_CGROUP=y
165# CONFIG_DEBUG_BLK_CGROUP is not set
166# CONFIG_CHECKPOINT_RESTORE is not set
167CONFIG_NAMESPACES=y
168CONFIG_UTS_NS=y
169CONFIG_IPC_NS=y
170# CONFIG_USER_NS is not set
171# CONFIG_PID_NS is not set
172CONFIG_NET_NS=y
173# CONFIG_SCHED_AUTOGROUP is not set
174CONFIG_MM_OWNER=y
175CONFIG_SYSFS_DEPRECATED=y
176# CONFIG_SYSFS_DEPRECATED_V2 is not set
177# CONFIG_RELAY is not set
178# CONFIG_BLK_DEV_INITRD is not set
179CONFIG_CC_OPTIMIZE_FOR_SIZE=y
180CONFIG_SYSCTL=y
181CONFIG_ANON_INODES=y
182# CONFIG_EXPERT is not set
183CONFIG_UID16=y
184# CONFIG_SYSCTL_SYSCALL is not set
185CONFIG_KALLSYMS=y
186# CONFIG_KALLSYMS_ALL is not set
187CONFIG_HOTPLUG=y
188CONFIG_PRINTK=y
189CONFIG_BUG=y
190CONFIG_ELF_CORE=y
191CONFIG_BASE_FULL=y
192CONFIG_FUTEX=y
193CONFIG_EPOLL=y
194CONFIG_SIGNALFD=y
195CONFIG_TIMERFD=y
196CONFIG_EVENTFD=y
197CONFIG_SHMEM=y
198CONFIG_AIO=y
199# CONFIG_EMBEDDED is not set
200
201#
202# Kernel Performance Events And Counters
203#
204CONFIG_VM_EVENT_COUNTERS=y
205CONFIG_COMPAT_BRK=y
206CONFIG_SLAB=y
207# CONFIG_SLUB is not set
208# CONFIG_PROFILING is not set
209
210#
211# GCOV-based kernel profiling
212#
213# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
214CONFIG_SLABINFO=y
215CONFIG_RT_MUTEXES=y
216CONFIG_BASE_SMALL=0
217CONFIG_MODULES=y
218# CONFIG_MODULE_FORCE_LOAD is not set
219CONFIG_MODULE_UNLOAD=y
220# CONFIG_MODULE_FORCE_UNLOAD is not set
221# CONFIG_MODVERSIONS is not set
222# CONFIG_MODULE_SRCVERSION_ALL is not set
223CONFIG_BLOCK=y
224CONFIG_LBDAF=y
225# CONFIG_BLK_DEV_BSG is not set
226# CONFIG_BLK_DEV_BSGLIB is not set
227# CONFIG_BLK_DEV_INTEGRITY is not set
228
229#
230# Partition Types
231#
232# CONFIG_PARTITION_ADVANCED is not set
233CONFIG_MSDOS_PARTITION=y
234
235#
236# IO Schedulers
237#
238CONFIG_IOSCHED_NOOP=y
239CONFIG_IOSCHED_DEADLINE=y
240CONFIG_IOSCHED_CFQ=m
241# CONFIG_CFQ_GROUP_IOSCHED is not set
242CONFIG_DEFAULT_DEADLINE=y
243# CONFIG_DEFAULT_CFQ is not set
244# CONFIG_DEFAULT_NOOP is not set
245CONFIG_DEFAULT_IOSCHED="deadline"
246# CONFIG_INLINE_SPIN_TRYLOCK is not set
247# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
248# CONFIG_INLINE_SPIN_LOCK is not set
249# CONFIG_INLINE_SPIN_LOCK_BH is not set
250# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
251# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
252CONFIG_INLINE_SPIN_UNLOCK=y
253# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
254CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
255# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
256# CONFIG_INLINE_READ_TRYLOCK is not set
257# CONFIG_INLINE_READ_LOCK is not set
258# CONFIG_INLINE_READ_LOCK_BH is not set
259# CONFIG_INLINE_READ_LOCK_IRQ is not set
260# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
261CONFIG_INLINE_READ_UNLOCK=y
262# CONFIG_INLINE_READ_UNLOCK_BH is not set
263CONFIG_INLINE_READ_UNLOCK_IRQ=y
264# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
265# CONFIG_INLINE_WRITE_TRYLOCK is not set
266# CONFIG_INLINE_WRITE_LOCK is not set
267# CONFIG_INLINE_WRITE_LOCK_BH is not set
268# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
269# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
270CONFIG_INLINE_WRITE_UNLOCK=y
271# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
272CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
273# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
274# CONFIG_MUTEX_SPIN_ON_OWNER is not set
275CONFIG_FREEZER=y
276
277#
278# UML Character Devices
279#
280CONFIG_STDERR_CONSOLE=y
281CONFIG_STDIO_CONSOLE=y
282CONFIG_SSL=y
283CONFIG_NULL_CHAN=y
284CONFIG_PORT_CHAN=y
285CONFIG_PTY_CHAN=y
286CONFIG_TTY_CHAN=y
287CONFIG_XTERM_CHAN=y
288# CONFIG_NOCONFIG_CHAN is not set
289CONFIG_CON_ZERO_CHAN="fd:0,fd:1"
290CONFIG_CON_CHAN="xterm"
291CONFIG_SSL_CHAN="pts"
292CONFIG_UML_SOUND=m
293CONFIG_SOUND=m
294CONFIG_SOUND_OSS_CORE=y
295CONFIG_HOSTAUDIO=m
296
297#
298# Device Drivers
299#
300
301#
302# Generic Driver Options
303#
304CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
305CONFIG_DEVTMPFS=y
306CONFIG_DEVTMPFS_MOUNT=y
307CONFIG_STANDALONE=y
308CONFIG_PREVENT_FIRMWARE_BUILD=y
309CONFIG_FW_LOADER=y
310CONFIG_FIRMWARE_IN_KERNEL=y
311CONFIG_EXTRA_FIRMWARE=""
312# CONFIG_DEBUG_DRIVER is not set
313# CONFIG_DEBUG_DEVRES is not set
314# CONFIG_SYS_HYPERVISOR is not set
315CONFIG_GENERIC_CPU_DEVICES=y
316# CONFIG_DMA_SHARED_BUFFER is not set
317# CONFIG_CONNECTOR is not set
318# CONFIG_MTD is not set
319CONFIG_BLK_DEV=y
320CONFIG_BLK_DEV_UBD=y
321# CONFIG_BLK_DEV_UBD_SYNC is not set
322CONFIG_BLK_DEV_COW_COMMON=y
323CONFIG_BLK_DEV_LOOP=m
324CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
325# CONFIG_BLK_DEV_CRYPTOLOOP is not set
326
327#
328# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
329#
330CONFIG_BLK_DEV_NBD=m
331# CONFIG_BLK_DEV_RAM is not set
332# CONFIG_ATA_OVER_ETH is not set
333# CONFIG_BLK_DEV_RBD is not set
334
335#
336# Misc devices
337#
338# CONFIG_ENCLOSURE_SERVICES is not set
339# CONFIG_C2PORT is not set
340
341#
342# EEPROM support
343#
344# CONFIG_EEPROM_93CX6 is not set
345
346#
347# Texas Instruments shared transport line discipline
348#
349
350#
351# Altera FPGA firmware download module
352#
353
354#
355# SCSI device support
356#
357CONFIG_SCSI_MOD=y
358# CONFIG_RAID_ATTRS is not set
359# CONFIG_SCSI is not set
360# CONFIG_SCSI_DMA is not set
361# CONFIG_SCSI_NETLINK is not set
362# CONFIG_MD is not set
363CONFIG_NETDEVICES=y
364CONFIG_NET_CORE=y
365# CONFIG_BONDING is not set
366CONFIG_DUMMY=m
367# CONFIG_EQUALIZER is not set
368# CONFIG_MII is not set
369# CONFIG_NET_TEAM is not set
370# CONFIG_MACVLAN is not set
371# CONFIG_NETCONSOLE is not set
372# CONFIG_NETPOLL is not set
373# CONFIG_NET_POLL_CONTROLLER is not set
374CONFIG_TUN=m
375# CONFIG_VETH is not set
376
377#
378# CAIF transport drivers
379#
380CONFIG_ETHERNET=y
381CONFIG_NET_VENDOR_CHELSIO=y
382CONFIG_NET_VENDOR_INTEL=y
383CONFIG_NET_VENDOR_I825XX=y
384CONFIG_NET_VENDOR_MARVELL=y
385CONFIG_NET_VENDOR_NATSEMI=y
386CONFIG_NET_VENDOR_8390=y
387# CONFIG_PHYLIB is not set
388CONFIG_PPP=m
389# CONFIG_PPP_BSDCOMP is not set
390# CONFIG_PPP_DEFLATE is not set
391# CONFIG_PPP_FILTER is not set
392# CONFIG_PPP_MPPE is not set
393# CONFIG_PPP_MULTILINK is not set
394# CONFIG_PPPOE is not set
395# CONFIG_PPP_ASYNC is not set
396# CONFIG_PPP_SYNC_TTY is not set
397CONFIG_SLIP=m
398CONFIG_SLHC=m
399# CONFIG_SLIP_COMPRESSED is not set
400# CONFIG_SLIP_SMART is not set
401# CONFIG_SLIP_MODE_SLIP6 is not set
402CONFIG_WLAN=y
403# CONFIG_HOSTAP is not set
404
405#
406# Enable WiMAX (Networking options) to see the WiMAX drivers
407#
408# CONFIG_WAN is not set
409
410#
411# Character devices
412#
413CONFIG_UNIX98_PTYS=y
414# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
415CONFIG_LEGACY_PTYS=y
416CONFIG_LEGACY_PTY_COUNT=32
417# CONFIG_N_GSM is not set
418# CONFIG_TRACE_SINK is not set
419CONFIG_DEVKMEM=y
420# CONFIG_HW_RANDOM is not set
421CONFIG_UML_RANDOM=y
422# CONFIG_R3964 is not set
423# CONFIG_NSC_GPIO is not set
424# CONFIG_RAW_DRIVER is not set
425
426#
427# PPS support
428#
429# CONFIG_PPS is not set
430
431#
432# PPS generators support
433#
434
435#
436# PTP clock support
437#
438
439#
440# Enable Device Drivers -> PPS to see the PTP clock options.
441#
442# CONFIG_POWER_SUPPLY is not set
443# CONFIG_THERMAL is not set
444# CONFIG_WATCHDOG is not set
445# CONFIG_REGULATOR is not set
446CONFIG_SOUND_OSS_CORE_PRECLAIM=y
447# CONFIG_MEMSTICK is not set
448# CONFIG_NEW_LEDS is not set
449# CONFIG_ACCESSIBILITY is not set
450# CONFIG_AUXDISPLAY is not set
451# CONFIG_UIO is not set
452
453#
454# Virtio drivers
455#
456# CONFIG_VIRTIO_BALLOON is not set
457
458#
459# Microsoft Hyper-V guest support
460#
461# CONFIG_STAGING is not set
462
463#
464# Hardware Spinlock drivers
465#
466CONFIG_IOMMU_SUPPORT=y
467# CONFIG_VIRT_DRIVERS is not set
468# CONFIG_PM_DEVFREQ is not set
469CONFIG_NET=y
470
471#
472# Networking options
473#
474CONFIG_PACKET=y
475CONFIG_UNIX=y
476# CONFIG_UNIX_DIAG is not set
477CONFIG_XFRM=y
478# CONFIG_XFRM_USER is not set
479# CONFIG_XFRM_SUB_POLICY is not set
480# CONFIG_XFRM_MIGRATE is not set
481# CONFIG_XFRM_STATISTICS is not set
482# CONFIG_NET_KEY is not set
483CONFIG_INET=y
484# CONFIG_IP_MULTICAST is not set
485# CONFIG_IP_ADVANCED_ROUTER is not set
486# CONFIG_IP_PNP is not set
487# CONFIG_NET_IPIP is not set
488# CONFIG_NET_IPGRE_DEMUX is not set
489# CONFIG_ARPD is not set
490# CONFIG_SYN_COOKIES is not set
491# CONFIG_INET_AH is not set
492# CONFIG_INET_ESP is not set
493# CONFIG_INET_IPCOMP is not set
494# CONFIG_INET_XFRM_TUNNEL is not set
495# CONFIG_INET_TUNNEL is not set
496CONFIG_INET_XFRM_MODE_TRANSPORT=y
497CONFIG_INET_XFRM_MODE_TUNNEL=y
498CONFIG_INET_XFRM_MODE_BEET=y
499# CONFIG_INET_LRO is not set
500CONFIG_INET_DIAG=y
501CONFIG_INET_TCP_DIAG=y
502# CONFIG_INET_UDP_DIAG is not set
503# CONFIG_TCP_CONG_ADVANCED is not set
504CONFIG_TCP_CONG_CUBIC=y
505CONFIG_DEFAULT_TCP_CONG="cubic"
506# CONFIG_TCP_MD5SIG is not set
507# CONFIG_IPV6 is not set
508# CONFIG_NETWORK_SECMARK is not set
509# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
510# CONFIG_NETFILTER is not set
511# CONFIG_IP_DCCP is not set
512# CONFIG_IP_SCTP is not set
513# CONFIG_RDS is not set
514# CONFIG_TIPC is not set
515# CONFIG_ATM is not set
516# CONFIG_L2TP is not set
517# CONFIG_BRIDGE is not set
518# CONFIG_NET_DSA is not set
519# CONFIG_VLAN_8021Q is not set
520# CONFIG_DECNET is not set
521# CONFIG_LLC2 is not set
522# CONFIG_IPX is not set
523# CONFIG_ATALK is not set
524# CONFIG_X25 is not set
525# CONFIG_LAPB is not set
526# CONFIG_ECONET is not set
527# CONFIG_WAN_ROUTER is not set
528# CONFIG_PHONET is not set
529# CONFIG_IEEE802154 is not set
530# CONFIG_NET_SCHED is not set
531# CONFIG_DCB is not set
532# CONFIG_BATMAN_ADV is not set
533# CONFIG_OPENVSWITCH is not set
534# CONFIG_NETPRIO_CGROUP is not set
535CONFIG_BQL=y
536
537#
538# Network testing
539#
540# CONFIG_NET_PKTGEN is not set
541# CONFIG_HAMRADIO is not set
542# CONFIG_CAN is not set
543# CONFIG_IRDA is not set
544# CONFIG_BT is not set
545# CONFIG_AF_RXRPC is not set
546CONFIG_WIRELESS=y
547# CONFIG_CFG80211 is not set
548# CONFIG_LIB80211 is not set
549
550#
551# CFG80211 needs to be enabled for MAC80211
552#
553# CONFIG_WIMAX is not set
554# CONFIG_RFKILL is not set
555# CONFIG_NET_9P is not set
556# CONFIG_CAIF is not set
557# CONFIG_CEPH_LIB is not set
558# CONFIG_NFC is not set
559
560#
561# UML Network Devices
562#
563CONFIG_UML_NET=y
564CONFIG_UML_NET_ETHERTAP=y
565CONFIG_UML_NET_TUNTAP=y
566CONFIG_UML_NET_SLIP=y
567CONFIG_UML_NET_DAEMON=y
568# CONFIG_UML_NET_VDE is not set
569CONFIG_UML_NET_MCAST=y
570# CONFIG_UML_NET_PCAP is not set
571CONFIG_UML_NET_SLIRP=y
572
573#
574# File systems
575#
576# CONFIG_EXT2_FS is not set
577# CONFIG_EXT3_FS is not set
578CONFIG_EXT4_FS=y
579CONFIG_EXT4_USE_FOR_EXT23=y
580CONFIG_EXT4_FS_XATTR=y
581# CONFIG_EXT4_FS_POSIX_ACL is not set
582# CONFIG_EXT4_FS_SECURITY is not set
583# CONFIG_EXT4_DEBUG is not set
584CONFIG_JBD2=y
585CONFIG_FS_MBCACHE=y
586CONFIG_REISERFS_FS=y
587# CONFIG_REISERFS_CHECK is not set
588# CONFIG_REISERFS_PROC_INFO is not set
589# CONFIG_REISERFS_FS_XATTR is not set
590# CONFIG_JFS_FS is not set
591# CONFIG_XFS_FS is not set
592# CONFIG_GFS2_FS is not set
593# CONFIG_BTRFS_FS is not set
594# CONFIG_NILFS2_FS is not set
595# CONFIG_FS_POSIX_ACL is not set
596CONFIG_FILE_LOCKING=y
597CONFIG_FSNOTIFY=y
598CONFIG_DNOTIFY=y
599CONFIG_INOTIFY_USER=y
600# CONFIG_FANOTIFY is not set
601CONFIG_QUOTA=y
602# CONFIG_QUOTA_NETLINK_INTERFACE is not set
603CONFIG_PRINT_QUOTA_WARNING=y
604# CONFIG_QUOTA_DEBUG is not set
605# CONFIG_QFMT_V1 is not set
606# CONFIG_QFMT_V2 is not set
607CONFIG_QUOTACTL=y
608CONFIG_AUTOFS4_FS=m
609# CONFIG_FUSE_FS is not set
610
611#
612# Caches
613#
614# CONFIG_FSCACHE is not set
615
616#
617# CD-ROM/DVD Filesystems
618#
619CONFIG_ISO9660_FS=m
620CONFIG_JOLIET=y
621# CONFIG_ZISOFS is not set
622# CONFIG_UDF_FS is not set
623
624#
625# DOS/FAT/NT Filesystems
626#
627# CONFIG_MSDOS_FS is not set
628# CONFIG_VFAT_FS is not set
629# CONFIG_NTFS_FS is not set
630
631#
632# Pseudo filesystems
633#
634CONFIG_PROC_FS=y
635CONFIG_PROC_KCORE=y
636CONFIG_PROC_SYSCTL=y
637CONFIG_PROC_PAGE_MONITOR=y
638CONFIG_SYSFS=y
639CONFIG_TMPFS=y
640# CONFIG_TMPFS_POSIX_ACL is not set
641# CONFIG_TMPFS_XATTR is not set
642# CONFIG_HUGETLB_PAGE is not set
643# CONFIG_CONFIGFS_FS is not set
644CONFIG_MISC_FILESYSTEMS=y
645# CONFIG_ADFS_FS is not set
646# CONFIG_AFFS_FS is not set
647# CONFIG_HFS_FS is not set
648# CONFIG_HFSPLUS_FS is not set
649# CONFIG_BEFS_FS is not set
650# CONFIG_BFS_FS is not set
651# CONFIG_EFS_FS is not set
652# CONFIG_LOGFS is not set
653# CONFIG_CRAMFS is not set
654# CONFIG_SQUASHFS is not set
655# CONFIG_VXFS_FS is not set
656# CONFIG_MINIX_FS is not set
657# CONFIG_OMFS_FS is not set
658# CONFIG_HPFS_FS is not set
659# CONFIG_QNX4FS_FS is not set
660# CONFIG_ROMFS_FS is not set
661# CONFIG_PSTORE is not set
662# CONFIG_SYSV_FS is not set
663# CONFIG_UFS_FS is not set
664CONFIG_NETWORK_FILESYSTEMS=y
665# CONFIG_NFS_FS is not set
666# CONFIG_NFSD is not set
667# CONFIG_CEPH_FS is not set
668# CONFIG_CIFS is not set
669# CONFIG_NCP_FS is not set
670# CONFIG_CODA_FS is not set
671# CONFIG_AFS_FS is not set
672CONFIG_NLS=y
673CONFIG_NLS_DEFAULT="iso8859-1"
674# CONFIG_NLS_CODEPAGE_437 is not set
675# CONFIG_NLS_CODEPAGE_737 is not set
676# CONFIG_NLS_CODEPAGE_775 is not set
677# CONFIG_NLS_CODEPAGE_850 is not set
678# CONFIG_NLS_CODEPAGE_852 is not set
679# CONFIG_NLS_CODEPAGE_855 is not set
680# CONFIG_NLS_CODEPAGE_857 is not set
681# CONFIG_NLS_CODEPAGE_860 is not set
682# CONFIG_NLS_CODEPAGE_861 is not set
683# CONFIG_NLS_CODEPAGE_862 is not set
684# CONFIG_NLS_CODEPAGE_863 is not set
685# CONFIG_NLS_CODEPAGE_864 is not set
686# CONFIG_NLS_CODEPAGE_865 is not set
687# CONFIG_NLS_CODEPAGE_866 is not set
688# CONFIG_NLS_CODEPAGE_869 is not set
689# CONFIG_NLS_CODEPAGE_936 is not set
690# CONFIG_NLS_CODEPAGE_950 is not set
691# CONFIG_NLS_CODEPAGE_932 is not set
692# CONFIG_NLS_CODEPAGE_949 is not set
693# CONFIG_NLS_CODEPAGE_874 is not set
694# CONFIG_NLS_ISO8859_8 is not set
695# CONFIG_NLS_CODEPAGE_1250 is not set
696# CONFIG_NLS_CODEPAGE_1251 is not set
697# CONFIG_NLS_ASCII is not set
698# CONFIG_NLS_ISO8859_1 is not set
699# CONFIG_NLS_ISO8859_2 is not set
700# CONFIG_NLS_ISO8859_3 is not set
701# CONFIG_NLS_ISO8859_4 is not set
702# CONFIG_NLS_ISO8859_5 is not set
703# CONFIG_NLS_ISO8859_6 is not set
704# CONFIG_NLS_ISO8859_7 is not set
705# CONFIG_NLS_ISO8859_9 is not set
706# CONFIG_NLS_ISO8859_13 is not set
707# CONFIG_NLS_ISO8859_14 is not set
708# CONFIG_NLS_ISO8859_15 is not set
709# CONFIG_NLS_KOI8_R is not set
710# CONFIG_NLS_KOI8_U is not set
711# CONFIG_NLS_UTF8 is not set
712
713#
714# Security options
715#
716# CONFIG_KEYS is not set
717# CONFIG_SECURITY_DMESG_RESTRICT is not set
718# CONFIG_SECURITY is not set
719# CONFIG_SECURITYFS is not set
720CONFIG_DEFAULT_SECURITY_DAC=y
721CONFIG_DEFAULT_SECURITY=""
722CONFIG_CRYPTO=y
723
724#
725# Crypto core or helper
726#
727# CONFIG_CRYPTO_FIPS is not set
728CONFIG_CRYPTO_ALGAPI=m
729CONFIG_CRYPTO_ALGAPI2=m
730CONFIG_CRYPTO_RNG=m
731CONFIG_CRYPTO_RNG2=m
732# CONFIG_CRYPTO_MANAGER is not set
733# CONFIG_CRYPTO_MANAGER2 is not set
734# CONFIG_CRYPTO_USER is not set
735# CONFIG_CRYPTO_GF128MUL is not set
736# CONFIG_CRYPTO_NULL is not set
737# CONFIG_CRYPTO_CRYPTD is not set
738# CONFIG_CRYPTO_AUTHENC is not set
739# CONFIG_CRYPTO_TEST is not set
740
741#
742# Authenticated Encryption with Associated Data
743#
744# CONFIG_CRYPTO_CCM is not set
745# CONFIG_CRYPTO_GCM is not set
746# CONFIG_CRYPTO_SEQIV is not set
747
748#
749# Block modes
750#
751# CONFIG_CRYPTO_CBC is not set
752# CONFIG_CRYPTO_CTR is not set
753# CONFIG_CRYPTO_CTS is not set
754# CONFIG_CRYPTO_ECB is not set
755# CONFIG_CRYPTO_LRW is not set
756# CONFIG_CRYPTO_PCBC is not set
757# CONFIG_CRYPTO_XTS is not set
758
759#
760# Hash modes
761#
762# CONFIG_CRYPTO_HMAC is not set
763# CONFIG_CRYPTO_XCBC is not set
764# CONFIG_CRYPTO_VMAC is not set
765
766#
767# Digest
768#
769# CONFIG_CRYPTO_CRC32C is not set
770# CONFIG_CRYPTO_GHASH is not set
771# CONFIG_CRYPTO_MD4 is not set
772# CONFIG_CRYPTO_MD5 is not set
773# CONFIG_CRYPTO_MICHAEL_MIC is not set
774# CONFIG_CRYPTO_RMD128 is not set
775# CONFIG_CRYPTO_RMD160 is not set
776# CONFIG_CRYPTO_RMD256 is not set
777# CONFIG_CRYPTO_RMD320 is not set
778# CONFIG_CRYPTO_SHA1 is not set
779# CONFIG_CRYPTO_SHA256 is not set
780# CONFIG_CRYPTO_SHA512 is not set
781# CONFIG_CRYPTO_TGR192 is not set
782# CONFIG_CRYPTO_WP512 is not set
783
784#
785# Ciphers
786#
787CONFIG_CRYPTO_AES=m
788# CONFIG_CRYPTO_AES_586 is not set
789# CONFIG_CRYPTO_ANUBIS is not set
790# CONFIG_CRYPTO_ARC4 is not set
791# CONFIG_CRYPTO_BLOWFISH is not set
792# CONFIG_CRYPTO_CAMELLIA is not set
793# CONFIG_CRYPTO_CAST5 is not set
794# CONFIG_CRYPTO_CAST6 is not set
795# CONFIG_CRYPTO_DES is not set
796# CONFIG_CRYPTO_FCRYPT is not set
797# CONFIG_CRYPTO_KHAZAD is not set
798# CONFIG_CRYPTO_SALSA20 is not set
799# CONFIG_CRYPTO_SALSA20_586 is not set
800# CONFIG_CRYPTO_SEED is not set
801# CONFIG_CRYPTO_SERPENT is not set
802# CONFIG_CRYPTO_TEA is not set
803# CONFIG_CRYPTO_TWOFISH is not set
804# CONFIG_CRYPTO_TWOFISH_586 is not set
805
806#
807# Compression
808#
809# CONFIG_CRYPTO_DEFLATE is not set
810# CONFIG_CRYPTO_ZLIB is not set
811# CONFIG_CRYPTO_LZO is not set
812
813#
814# Random Number Generation
815#
816CONFIG_CRYPTO_ANSI_CPRNG=m
817# CONFIG_CRYPTO_USER_API_HASH is not set
818# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
819CONFIG_CRYPTO_HW=y
820# CONFIG_BINARY_PRINTF is not set
821
822#
823# Library routines
824#
825CONFIG_BITREVERSE=y
826CONFIG_GENERIC_FIND_FIRST_BIT=y
827CONFIG_GENERIC_IO=y
828# CONFIG_CRC_CCITT is not set
829CONFIG_CRC16=y
830# CONFIG_CRC_T10DIF is not set
831# CONFIG_CRC_ITU_T is not set
832CONFIG_CRC32=y
833# CONFIG_CRC7 is not set
834# CONFIG_LIBCRC32C is not set
835# CONFIG_CRC8 is not set
836# CONFIG_XZ_DEC is not set
837# CONFIG_XZ_DEC_BCJ is not set
838CONFIG_DQL=y
839CONFIG_NLATTR=y
840# CONFIG_AVERAGE is not set
841# CONFIG_CORDIC is not set
842
843#
844# Kernel hacking
845#
846# CONFIG_PRINTK_TIME is not set
847CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
848CONFIG_ENABLE_WARN_DEPRECATED=y
849CONFIG_ENABLE_MUST_CHECK=y
850CONFIG_FRAME_WARN=1024
851# CONFIG_STRIP_ASM_SYMS is not set
852# CONFIG_UNUSED_SYMBOLS is not set
853# CONFIG_DEBUG_FS is not set
854# CONFIG_DEBUG_SECTION_MISMATCH is not set
855CONFIG_DEBUG_KERNEL=y
856# CONFIG_DEBUG_SHIRQ is not set
857# CONFIG_LOCKUP_DETECTOR is not set
858# CONFIG_HARDLOCKUP_DETECTOR is not set
859# CONFIG_DETECT_HUNG_TASK is not set
860CONFIG_SCHED_DEBUG=y
861# CONFIG_SCHEDSTATS is not set
862# CONFIG_TIMER_STATS is not set
863# CONFIG_DEBUG_OBJECTS is not set
864# CONFIG_DEBUG_SLAB is not set
865# CONFIG_DEBUG_RT_MUTEXES is not set
866# CONFIG_RT_MUTEX_TESTER is not set
867# CONFIG_DEBUG_SPINLOCK is not set
868# CONFIG_DEBUG_MUTEXES is not set
869# CONFIG_SPARSE_RCU_POINTER is not set
870# CONFIG_DEBUG_ATOMIC_SLEEP is not set
871# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
872# CONFIG_DEBUG_STACK_USAGE is not set
873# CONFIG_DEBUG_KOBJECT is not set
874CONFIG_DEBUG_BUGVERBOSE=y
875CONFIG_DEBUG_INFO=y
876# CONFIG_DEBUG_INFO_REDUCED is not set
877# CONFIG_DEBUG_VM is not set
878# CONFIG_DEBUG_WRITECOUNT is not set
879CONFIG_DEBUG_MEMORY_INIT=y
880# CONFIG_DEBUG_LIST is not set
881# CONFIG_TEST_LIST_SORT is not set
882# CONFIG_DEBUG_SG is not set
883# CONFIG_DEBUG_NOTIFIERS is not set
884# CONFIG_DEBUG_CREDENTIALS is not set
885CONFIG_FRAME_POINTER=y
886# CONFIG_BOOT_PRINTK_DELAY is not set
887# CONFIG_RCU_TORTURE_TEST is not set
888# CONFIG_BACKTRACE_SELF_TEST is not set
889# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
890# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
891# CONFIG_FAULT_INJECTION is not set
892# CONFIG_SYSCTL_SYSCALL_CHECK is not set
893# CONFIG_DEBUG_PAGEALLOC is not set
894# CONFIG_ATOMIC64_SELFTEST is not set
895# CONFIG_SAMPLES is not set
896# CONFIG_TEST_KSTRTOX is not set
897# CONFIG_GPROF is not set
898# CONFIG_GCOV is not set
899CONFIG_EARLY_PRINTK=y
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 3df3bd544492..29880c9b324e 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -645,11 +645,9 @@ void mconsole_sysrq(struct mc_request *req)
645 645
646static void stack_proc(void *arg) 646static void stack_proc(void *arg)
647{ 647{
648 struct task_struct *from = current, *to = arg; 648 struct task_struct *task = arg;
649 649
650 to->thread.saved_task = from; 650 show_stack(task, NULL);
651 rcu_user_hooks_switch(from, to);
652 switch_to(from, to, from);
653} 651}
654 652
655/* 653/*
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index c03cd5a02364..d89b02bb6262 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -19,8 +19,8 @@ struct task_struct;
19struct mm_struct; 19struct mm_struct;
20 20
21struct thread_struct { 21struct thread_struct {
22 struct task_struct *saved_task;
23 struct pt_regs regs; 22 struct pt_regs regs;
23 struct pt_regs *segv_regs;
24 int singlestep_syscall; 24 int singlestep_syscall;
25 void *fault_addr; 25 void *fault_addr;
26 jmp_buf *fault_catcher; 26 jmp_buf *fault_catcher;
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 2c8eeb2df8b4..1c5b2a83046a 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -60,8 +60,6 @@ static inline struct thread_info *current_thread_info(void)
60 60
61#endif 61#endif
62 62
63#define PREEMPT_ACTIVE 0x10000000
64
65#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 63#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
66#define TIF_SIGPENDING 1 /* signal pending */ 64#define TIF_SIGPENDING 1 /* signal pending */
67#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 65#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index 694c792bab4e..41c8c774ec10 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -44,7 +44,6 @@ struct cpu_task {
44 44
45extern struct cpu_task cpu_tasks[]; 45extern struct cpu_task cpu_tasks[];
46 46
47extern unsigned long low_physmem;
48extern unsigned long high_physmem; 47extern unsigned long high_physmem;
49extern unsigned long uml_physmem; 48extern unsigned long uml_physmem;
50extern unsigned long uml_reserved; 49extern unsigned long uml_reserved;
@@ -52,8 +51,6 @@ extern unsigned long end_vm;
52extern unsigned long start_vm; 51extern unsigned long start_vm;
53extern unsigned long long highmem; 52extern unsigned long long highmem;
54 53
55extern unsigned long _stext, _etext, _sdata, _edata, __bss_start, _end;
56extern unsigned long _unprotected_end;
57extern unsigned long brk_start; 54extern unsigned long brk_start;
58 55
59extern unsigned long host_task_size; 56extern unsigned long host_task_size;
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 021104d98cb3..75298d3358e7 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -227,6 +227,7 @@ extern void block_signals(void);
227extern void unblock_signals(void); 227extern void unblock_signals(void);
228extern int get_signals(void); 228extern int get_signals(void);
229extern int set_signals(int enable); 229extern int set_signals(int enable);
230extern int os_is_signal_stack(void);
230 231
231/* util.c */ 232/* util.c */
232extern void stack_protections(unsigned long address); 233extern void stack_protections(unsigned long address);
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index bbcef522bcb1..eecc4142764c 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -82,19 +82,8 @@ void *__switch_to(struct task_struct *from, struct task_struct *to)
82 to->thread.prev_sched = from; 82 to->thread.prev_sched = from;
83 set_current(to); 83 set_current(to);
84 84
85 do { 85 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
86 current->thread.saved_task = NULL; 86 arch_switch_to(current);
87
88 switch_threads(&from->thread.switch_buf,
89 &to->thread.switch_buf);
90
91 arch_switch_to(current);
92
93 if (current->thread.saved_task)
94 show_regs(&(current->thread.regs));
95 to = current->thread.saved_task;
96 from = current;
97 } while (current->thread.saved_task);
98 87
99 return current->thread.prev_sched; 88 return current->thread.prev_sched;
100} 89}
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 0dc4d1c6f98a..799d7e413bf5 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -1,6 +1,10 @@
1/* 1/*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Copyright (C) 2013 Richard Weinberger <richrd@nod.at>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
4 */ 8 */
5 9
6#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
@@ -8,59 +12,87 @@
8#include <linux/module.h> 12#include <linux/module.h>
9#include <linux/sched.h> 13#include <linux/sched.h>
10#include <asm/sysrq.h> 14#include <asm/sysrq.h>
15#include <os.h>
11 16
12/* Catch non-i386 SUBARCH's. */ 17struct stack_frame {
13#if !defined(CONFIG_UML_X86) || defined(CONFIG_64BIT) 18 struct stack_frame *next_frame;
14void show_trace(struct task_struct *task, unsigned long * stack) 19 unsigned long return_address;
20};
21
22static void do_stack_trace(unsigned long *sp, unsigned long bp)
15{ 23{
24 int reliable;
16 unsigned long addr; 25 unsigned long addr;
26 struct stack_frame *frame = (struct stack_frame *)bp;
17 27
18 if (!stack) { 28 printk(KERN_INFO "Call Trace:\n");
19 stack = (unsigned long*) &stack; 29 while (((long) sp & (THREAD_SIZE-1)) != 0) {
20 WARN_ON(1); 30 addr = *sp;
21 }
22
23 printk(KERN_INFO "Call Trace: \n");
24 while (((long) stack & (THREAD_SIZE-1)) != 0) {
25 addr = *stack;
26 if (__kernel_text_address(addr)) { 31 if (__kernel_text_address(addr)) {
27 printk(KERN_INFO "%08lx: [<%08lx>]", 32 reliable = 0;
28 (unsigned long) stack, addr); 33 if ((unsigned long) sp == bp + sizeof(long)) {
29 print_symbol(KERN_CONT " %s", addr); 34 frame = frame ? frame->next_frame : NULL;
35 bp = (unsigned long)frame;
36 reliable = 1;
37 }
38
39 printk(KERN_INFO " [<%08lx>]", addr);
40 printk(KERN_CONT " %s", reliable ? "" : "? ");
41 print_symbol(KERN_CONT "%s", addr);
30 printk(KERN_CONT "\n"); 42 printk(KERN_CONT "\n");
31 } 43 }
32 stack++; 44 sp++;
33 } 45 }
34 printk(KERN_INFO "\n"); 46 printk(KERN_INFO "\n");
35} 47}
36#endif
37 48
38/*Stolen from arch/i386/kernel/traps.c */ 49static unsigned long get_frame_pointer(struct task_struct *task,
39static const int kstack_depth_to_print = 24; 50 struct pt_regs *segv_regs)
51{
52 if (!task || task == current)
53 return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
54 else
55 return KSTK_EBP(task);
56}
40 57
41/* This recently started being used in arch-independent code too, as in 58static unsigned long *get_stack_pointer(struct task_struct *task,
42 * kernel/sched/core.c.*/ 59 struct pt_regs *segv_regs)
43void show_stack(struct task_struct *task, unsigned long *esp)
44{ 60{
45 unsigned long *stack; 61 if (!task || task == current)
62 return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
63 else
64 return (unsigned long *)KSTK_ESP(task);
65}
66
67void show_stack(struct task_struct *task, unsigned long *stack)
68{
69 unsigned long *sp = stack, bp = 0;
70 struct pt_regs *segv_regs = current->thread.segv_regs;
46 int i; 71 int i;
47 72
48 if (esp == NULL) { 73 if (!segv_regs && os_is_signal_stack()) {
49 if (task != current && task != NULL) { 74 printk(KERN_ERR "Received SIGSEGV in SIGSEGV handler,"
50 esp = (unsigned long *) KSTK_ESP(task); 75 " aborting stack trace!\n");
51 } else { 76 return;
52 esp = (unsigned long *) &esp;
53 }
54 } 77 }
55 78
56 stack = esp; 79#ifdef CONFIG_FRAME_POINTER
57 for (i = 0; i < kstack_depth_to_print; i++) { 80 bp = get_frame_pointer(task, segv_regs);
81#endif
82
83 if (!stack)
84 sp = get_stack_pointer(task, segv_regs);
85
86 printk(KERN_INFO "Stack:\n");
87 stack = sp;
88 for (i = 0; i < 3 * STACKSLOTS_PER_LINE; i++) {
58 if (kstack_end(stack)) 89 if (kstack_end(stack))
59 break; 90 break;
60 if (i && ((i % 8) == 0)) 91 if (i && ((i % STACKSLOTS_PER_LINE) == 0))
61 printk(KERN_INFO " "); 92 printk(KERN_CONT "\n");
62 printk(KERN_CONT "%08lx ", *stack++); 93 printk(KERN_CONT " %08lx", *stack++);
63 } 94 }
95 printk(KERN_CONT "\n");
64 96
65 show_trace(task, esp); 97 do_stack_trace(sp, bp);
66} 98}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 5c3aef74237f..974b87474a99 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -206,9 +206,12 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
206 int is_write = FAULT_WRITE(fi); 206 int is_write = FAULT_WRITE(fi);
207 unsigned long address = FAULT_ADDRESS(fi); 207 unsigned long address = FAULT_ADDRESS(fi);
208 208
209 if (regs)
210 current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
211
209 if (!is_user && (address >= start_vm) && (address < end_vm)) { 212 if (!is_user && (address >= start_vm) && (address < end_vm)) {
210 flush_tlb_kernel_vm(); 213 flush_tlb_kernel_vm();
211 return 0; 214 goto out;
212 } 215 }
213 else if (current->mm == NULL) { 216 else if (current->mm == NULL) {
214 show_regs(container_of(regs, struct pt_regs, regs)); 217 show_regs(container_of(regs, struct pt_regs, regs));
@@ -230,7 +233,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
230 233
231 catcher = current->thread.fault_catcher; 234 catcher = current->thread.fault_catcher;
232 if (!err) 235 if (!err)
233 return 0; 236 goto out;
234 else if (catcher != NULL) { 237 else if (catcher != NULL) {
235 current->thread.fault_addr = (void *) address; 238 current->thread.fault_addr = (void *) address;
236 UML_LONGJMP(catcher, 1); 239 UML_LONGJMP(catcher, 1);
@@ -238,7 +241,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
238 else if (current->thread.fault_addr != NULL) 241 else if (current->thread.fault_addr != NULL)
239 panic("fault_addr set but no fault catcher"); 242 panic("fault_addr set but no fault catcher");
240 else if (!is_user && arch_fixup(ip, regs)) 243 else if (!is_user && arch_fixup(ip, regs))
241 return 0; 244 goto out;
242 245
243 if (!is_user) { 246 if (!is_user) {
244 show_regs(container_of(regs, struct pt_regs, regs)); 247 show_regs(container_of(regs, struct pt_regs, regs));
@@ -262,6 +265,11 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
262 current->thread.arch.faultinfo = fi; 265 current->thread.arch.faultinfo = fi;
263 force_sig_info(SIGSEGV, &si, current); 266 force_sig_info(SIGSEGV, &si, current);
264 } 267 }
268
269out:
270 if (regs)
271 current->thread.segv_regs = NULL;
272
265 return 0; 273 return 0;
266} 274}
267 275
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 87df5e3acc26..016adf0985d5 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -13,6 +13,7 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/sections.h>
16#include <asm/setup.h> 17#include <asm/setup.h>
17#include <as-layout.h> 18#include <as-layout.h>
18#include <arch.h> 19#include <arch.h>
@@ -234,7 +235,6 @@ static int panic_exit(struct notifier_block *self, unsigned long unused1,
234 void *unused2) 235 void *unused2)
235{ 236{
236 bust_spinlocks(1); 237 bust_spinlocks(1);
237 show_regs(&(current->thread.regs));
238 bust_spinlocks(0); 238 bust_spinlocks(0);
239 uml_exitcode = 1; 239 uml_exitcode = 1;
240 os_dump_core(); 240 os_dump_core();
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 905924b773d3..7b605e4dfffa 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -304,3 +304,11 @@ int set_signals(int enable)
304 304
305 return ret; 305 return ret;
306} 306}
307
308int os_is_signal_stack(void)
309{
310 stack_t ss;
311 sigaltstack(NULL, &ss);
312
313 return ss.ss_flags & SS_ONSTACK;
314}
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
index 818b4a1edb5b..af36d8eabdf1 100644
--- a/arch/unicore32/include/asm/thread_info.h
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -118,12 +118,6 @@ static inline struct thread_info *current_thread_info(void)
118#endif 118#endif
119 119
120/* 120/*
121 * We use bit 30 of the preempt_count to indicate that kernel
122 * preemption is occurring. See <asm/hardirq.h>.
123 */
124#define PREEMPT_ACTIVE 0x40000000
125
126/*
127 * thread information flags: 121 * thread information flags:
128 * TIF_SYSCALL_TRACE - syscall trace active 122 * TIF_SYSCALL_TRACE - syscall trace active
129 * TIF_SIGPENDING - signal pending 123 * TIF_SIGPENDING - signal pending
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 41250fb33985..57d021507120 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -31,6 +31,9 @@ ifeq ($(CONFIG_X86_32),y)
31 31
32 KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return 32 KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
33 33
34 # Don't autogenerate MMX or SSE instructions
35 KBUILD_CFLAGS += -mno-mmx -mno-sse
36
34 # Never want PIC in a 32-bit kernel, prevent breakage with GCC built 37 # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
35 # with nonstandard options 38 # with nonstandard options
36 KBUILD_CFLAGS += -fno-pic 39 KBUILD_CFLAGS += -fno-pic
@@ -57,8 +60,11 @@ else
57 KBUILD_AFLAGS += -m64 60 KBUILD_AFLAGS += -m64
58 KBUILD_CFLAGS += -m64 61 KBUILD_CFLAGS += -m64
59 62
63 # Don't autogenerate MMX or SSE instructions
64 KBUILD_CFLAGS += -mno-mmx -mno-sse
65
60 # Use -mpreferred-stack-boundary=3 if supported. 66 # Use -mpreferred-stack-boundary=3 if supported.
61 KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3) 67 KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
62 68
63 # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu) 69 # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
64 cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) 70 cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index dce69a256896..d9c11956fce0 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
53 53
54# How to compile the 16-bit code. Note we always compile for -march=i386, 54# How to compile the 16-bit code. Note we always compile for -march=i386,
55# that way we can complain to the user if the CPU is insufficient. 55# that way we can complain to the user if the CPU is insufficient.
56KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ 56KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
57 -DDISABLE_BRANCH_PROFILING \ 57 -DDISABLE_BRANCH_PROFILING \
58 -Wall -Wstrict-prototypes \ 58 -Wall -Wstrict-prototypes \
59 -march=i386 -mregparm=3 \ 59 -march=i386 -mregparm=3 \
60 -include $(srctree)/$(src)/code16gcc.h \ 60 -include $(srctree)/$(src)/code16gcc.h \
61 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ 61 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
62 -mno-mmx -mno-sse \
62 $(call cc-option, -ffreestanding) \ 63 $(call cc-option, -ffreestanding) \
63 $(call cc-option, -fno-toplevel-reorder,\ 64 $(call cc-option, -fno-toplevel-reorder,\
64 $(call cc-option, -fno-unit-at-a-time)) \ 65 $(call cc-option, -fno-unit-at-a-time)) \
65 $(call cc-option, -fno-stack-protector) \ 66 $(call cc-option, -fno-stack-protector) \
66 $(call cc-option, -mpreferred-stack-boundary=2) 67 $(call cc-option, -mpreferred-stack-boundary=2)
67KBUILD_CFLAGS += $(call cc-option, -m32)
68KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ 68KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
69GCOV_PROFILE := n 69GCOV_PROFILE := n
70 70
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index dcd90df10ab4..c8a6792e7842 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -13,6 +13,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
13cflags-$(CONFIG_X86_32) := -march=i386 13cflags-$(CONFIG_X86_32) := -march=i386
14cflags-$(CONFIG_X86_64) := -mcmodel=small 14cflags-$(CONFIG_X86_64) := -mcmodel=small
15KBUILD_CFLAGS += $(cflags-y) 15KBUILD_CFLAGS += $(cflags-y)
16KBUILD_CFLAGS += -mno-mmx -mno-sse
16KBUILD_CFLAGS += $(call cc-option,-ffreestanding) 17KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
17KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) 18KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
18 19
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 7d6ba9db1be9..e0fc24db234a 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -3,8 +3,9 @@
3# 3#
4 4
5avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) 5avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
6avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
7 $(comma)4)$(comma)%ymm2,yes,no)
6 8
7obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o
8obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o 9obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
9 10
10obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o 11obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index f80e668785c0..835488b745ee 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -34,7 +34,7 @@
34#include <asm/cpu_device_id.h> 34#include <asm/cpu_device_id.h>
35#include <asm/i387.h> 35#include <asm/i387.h>
36#include <asm/crypto/aes.h> 36#include <asm/crypto/aes.h>
37#include <asm/crypto/ablk_helper.h> 37#include <crypto/ablk_helper.h>
38#include <crypto/scatterwalk.h> 38#include <crypto/scatterwalk.h>
39#include <crypto/internal/aead.h> 39#include <crypto/internal/aead.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 414fe5d7946b..4209a76fcdaa 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <crypto/ablk_helper.h>
17#include <crypto/algapi.h> 18#include <crypto/algapi.h>
18#include <crypto/ctr.h> 19#include <crypto/ctr.h>
19#include <crypto/lrw.h> 20#include <crypto/lrw.h>
@@ -21,7 +22,6 @@
21#include <asm/xcr.h> 22#include <asm/xcr.h>
22#include <asm/xsave.h> 23#include <asm/xsave.h>
23#include <asm/crypto/camellia.h> 24#include <asm/crypto/camellia.h>
24#include <asm/crypto/ablk_helper.h>
25#include <asm/crypto/glue_helper.h> 25#include <asm/crypto/glue_helper.h>
26 26
27#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 27#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 37fd0c0a81ea..87a041a10f4a 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <crypto/ablk_helper.h>
17#include <crypto/algapi.h> 18#include <crypto/algapi.h>
18#include <crypto/ctr.h> 19#include <crypto/ctr.h>
19#include <crypto/lrw.h> 20#include <crypto/lrw.h>
@@ -21,7 +22,6 @@
21#include <asm/xcr.h> 22#include <asm/xcr.h>
22#include <asm/xsave.h> 23#include <asm/xsave.h>
23#include <asm/crypto/camellia.h> 24#include <asm/crypto/camellia.h>
24#include <asm/crypto/ablk_helper.h>
25#include <asm/crypto/glue_helper.h> 25#include <asm/crypto/glue_helper.h>
26 26
27#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 27#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index c6631813dc11..e6a3700489b9 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -26,13 +26,13 @@
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/crypto.h> 27#include <linux/crypto.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <crypto/ablk_helper.h>
29#include <crypto/algapi.h> 30#include <crypto/algapi.h>
30#include <crypto/cast5.h> 31#include <crypto/cast5.h>
31#include <crypto/cryptd.h> 32#include <crypto/cryptd.h>
32#include <crypto/ctr.h> 33#include <crypto/ctr.h>
33#include <asm/xcr.h> 34#include <asm/xcr.h>
34#include <asm/xsave.h> 35#include <asm/xsave.h>
35#include <asm/crypto/ablk_helper.h>
36#include <asm/crypto/glue_helper.h> 36#include <asm/crypto/glue_helper.h>
37 37
38#define CAST5_PARALLEL_BLOCKS 16 38#define CAST5_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 8d0dfb86a559..09f3677393e4 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/crypto.h> 29#include <linux/crypto.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <crypto/ablk_helper.h>
31#include <crypto/algapi.h> 32#include <crypto/algapi.h>
32#include <crypto/cast6.h> 33#include <crypto/cast6.h>
33#include <crypto/cryptd.h> 34#include <crypto/cryptd.h>
@@ -37,7 +38,6 @@
37#include <crypto/xts.h> 38#include <crypto/xts.h>
38#include <asm/xcr.h> 39#include <asm/xcr.h>
39#include <asm/xsave.h> 40#include <asm/xsave.h>
40#include <asm/crypto/ablk_helper.h>
41#include <asm/crypto/glue_helper.h> 41#include <asm/crypto/glue_helper.h>
42 42
43#define CAST6_PARALLEL_BLOCKS 8 43#define CAST6_PARALLEL_BLOCKS 8
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 23aabc6c20a5..2fae489b1524 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <crypto/ablk_helper.h>
17#include <crypto/algapi.h> 18#include <crypto/algapi.h>
18#include <crypto/ctr.h> 19#include <crypto/ctr.h>
19#include <crypto/lrw.h> 20#include <crypto/lrw.h>
@@ -22,7 +23,6 @@
22#include <asm/xcr.h> 23#include <asm/xcr.h>
23#include <asm/xsave.h> 24#include <asm/xsave.h>
24#include <asm/crypto/serpent-avx.h> 25#include <asm/crypto/serpent-avx.h>
25#include <asm/crypto/ablk_helper.h>
26#include <asm/crypto/glue_helper.h> 26#include <asm/crypto/glue_helper.h>
27 27
28#define SERPENT_AVX2_PARALLEL_BLOCKS 16 28#define SERPENT_AVX2_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 9ae83cf8d21e..ff4870870972 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/crypto.h> 29#include <linux/crypto.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <crypto/ablk_helper.h>
31#include <crypto/algapi.h> 32#include <crypto/algapi.h>
32#include <crypto/serpent.h> 33#include <crypto/serpent.h>
33#include <crypto/cryptd.h> 34#include <crypto/cryptd.h>
@@ -38,7 +39,6 @@
38#include <asm/xcr.h> 39#include <asm/xcr.h>
39#include <asm/xsave.h> 40#include <asm/xsave.h>
40#include <asm/crypto/serpent-avx.h> 41#include <asm/crypto/serpent-avx.h>
41#include <asm/crypto/ablk_helper.h>
42#include <asm/crypto/glue_helper.h> 42#include <asm/crypto/glue_helper.h>
43 43
44/* 8-way parallel cipher functions */ 44/* 8-way parallel cipher functions */
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 97a356ece24d..8c95f8637306 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -34,6 +34,7 @@
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/crypto.h> 35#include <linux/crypto.h>
36#include <linux/err.h> 36#include <linux/err.h>
37#include <crypto/ablk_helper.h>
37#include <crypto/algapi.h> 38#include <crypto/algapi.h>
38#include <crypto/serpent.h> 39#include <crypto/serpent.h>
39#include <crypto/cryptd.h> 40#include <crypto/cryptd.h>
@@ -42,7 +43,6 @@
42#include <crypto/lrw.h> 43#include <crypto/lrw.h>
43#include <crypto/xts.h> 44#include <crypto/xts.h>
44#include <asm/crypto/serpent-sse2.h> 45#include <asm/crypto/serpent-sse2.h>
45#include <asm/crypto/ablk_helper.h>
46#include <asm/crypto/glue_helper.h> 46#include <asm/crypto/glue_helper.h>
47 47
48static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) 48static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 50226c4b86ed..f248546da1ca 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void)
281 /* allow AVX to override SSSE3, it's a little faster */ 281 /* allow AVX to override SSSE3, it's a little faster */
282 if (avx_usable()) { 282 if (avx_usable()) {
283#ifdef CONFIG_AS_AVX2 283#ifdef CONFIG_AS_AVX2
284 if (boot_cpu_has(X86_FEATURE_AVX2)) 284 if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2))
285 sha256_transform_asm = sha256_transform_rorx; 285 sha256_transform_asm = sha256_transform_rorx;
286 else 286 else
287#endif 287#endif
@@ -319,4 +319,4 @@ MODULE_LICENSE("GPL");
319MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 319MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
320 320
321MODULE_ALIAS("sha256"); 321MODULE_ALIAS("sha256");
322MODULE_ALIAS("sha384"); 322MODULE_ALIAS("sha224");
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index a62ba541884e..4e3c665be129 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/crypto.h> 29#include <linux/crypto.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <crypto/ablk_helper.h>
31#include <crypto/algapi.h> 32#include <crypto/algapi.h>
32#include <crypto/twofish.h> 33#include <crypto/twofish.h>
33#include <crypto/cryptd.h> 34#include <crypto/cryptd.h>
@@ -39,7 +40,6 @@
39#include <asm/xcr.h> 40#include <asm/xcr.h>
40#include <asm/xsave.h> 41#include <asm/xsave.h>
41#include <asm/crypto/twofish.h> 42#include <asm/crypto/twofish.h>
42#include <asm/crypto/ablk_helper.h>
43#include <asm/crypto/glue_helper.h> 43#include <asm/crypto/glue_helper.h>
44#include <crypto/scatterwalk.h> 44#include <crypto/scatterwalk.h>
45#include <linux/workqueue.h> 45#include <linux/workqueue.h>
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index da31c8b8a92d..b17f4f48ecd7 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -77,7 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v)
77 */ 77 */
78static inline int atomic_sub_and_test(int i, atomic_t *v) 78static inline int atomic_sub_and_test(int i, atomic_t *v)
79{ 79{
80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e"); 80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
81} 81}
82 82
83/** 83/**
@@ -141,7 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
141 */ 141 */
142static inline int atomic_add_negative(int i, atomic_t *v) 142static inline int atomic_add_negative(int i, atomic_t *v)
143{ 143{
144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s"); 144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
145} 145}
146 146
147/** 147/**
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 3f065c985aee..46e9052bbd28 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -72,7 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
72 */ 72 */
73static inline int atomic64_sub_and_test(long i, atomic64_t *v) 73static inline int atomic64_sub_and_test(long i, atomic64_t *v)
74{ 74{
75 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e"); 75 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
76} 76}
77 77
78/** 78/**
@@ -138,7 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
138 */ 138 */
139static inline int atomic64_add_negative(long i, atomic64_t *v) 139static inline int atomic64_add_negative(long i, atomic64_t *v)
140{ 140{
141 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s"); 141 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
142} 142}
143 143
144/** 144/**
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 6d76d0935989..9fc1af74dc83 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
205 */ 205 */
206static inline int test_and_set_bit(long nr, volatile unsigned long *addr) 206static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
207{ 207{
208 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c"); 208 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
209} 209}
210 210
211/** 211/**
@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
251 */ 251 */
252static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) 252static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
253{ 253{
254 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c"); 254 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
255} 255}
256 256
257/** 257/**
@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
304 */ 304 */
305static inline int test_and_change_bit(long nr, volatile unsigned long *addr) 305static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
306{ 306{
307 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c"); 307 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
308} 308}
309 309
310static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) 310static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 5b23e605e707..4ad6560847b1 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -52,7 +52,7 @@ static inline void local_sub(long i, local_t *l)
52 */ 52 */
53static inline int local_sub_and_test(long i, local_t *l) 53static inline int local_sub_and_test(long i, local_t *l)
54{ 54{
55 GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e"); 55 GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
56} 56}
57 57
58/** 58/**
@@ -92,7 +92,7 @@ static inline int local_inc_and_test(local_t *l)
92 */ 92 */
93static inline int local_add_negative(long i, local_t *l) 93static inline int local_add_negative(long i, local_t *l)
94{ 94{
95 GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s"); 95 GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
96} 96}
97 97
98/** 98/**
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 7d7443283a9d..947b5c417e83 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -15,7 +15,7 @@ struct pci_sysdata {
15 int domain; /* PCI domain */ 15 int domain; /* PCI domain */
16 int node; /* NUMA node */ 16 int node; /* NUMA node */
17#ifdef CONFIG_ACPI 17#ifdef CONFIG_ACPI
18 void *acpi; /* ACPI-specific data */ 18 struct acpi_device *companion; /* ACPI companion device */
19#endif 19#endif
20#ifdef CONFIG_X86_64 20#ifdef CONFIG_X86_64
21 void *iommu; /* IOMMU private data */ 21 void *iommu; /* IOMMU private data */
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 1ff990f1de8e..8f7866a5b9a4 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -16,8 +16,8 @@ cc_label: \
16#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ 16#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
17 __GEN_RMWcc(op " " arg0, var, cc) 17 __GEN_RMWcc(op " " arg0, var, cc)
18 18
19#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \ 19#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
20 __GEN_RMWcc(op " %1, " arg0, var, cc, "er" (val)) 20 __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
21 21
22#else /* !CC_HAVE_ASM_GOTO */ 22#else /* !CC_HAVE_ASM_GOTO */
23 23
@@ -33,8 +33,8 @@ do { \
33#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ 33#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
34 __GEN_RMWcc(op " " arg0, var, cc) 34 __GEN_RMWcc(op " " arg0, var, cc)
35 35
36#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \ 36#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
37 __GEN_RMWcc(op " %2, " arg0, var, cc, "er" (val)) 37 __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
38 38
39#endif /* CC_HAVE_ASM_GOTO */ 39#endif /* CC_HAVE_ASM_GOTO */
40 40
diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h
new file mode 100644
index 000000000000..ee80b92f0096
--- /dev/null
+++ b/arch/x86/include/asm/simd.h
@@ -0,0 +1,11 @@
1
2#include <asm/i387.h>
3
4/*
5 * may_use_simd - whether it is allowable at this time to issue SIMD
6 * instructions or access the SIMD register file
7 */
8static __must_check inline bool may_use_simd(void)
9{
10 return irq_fpu_usable();
11}
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index c46a46be1ec6..3ba3de457d05 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -153,8 +153,6 @@ struct thread_info {
153#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) 153#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
154#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) 154#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
155 155
156#define PREEMPT_ACTIVE 0x10000000
157
158#ifdef CONFIG_X86_32 156#ifdef CONFIG_X86_32
159 157
160#define STACK_WARN (THREAD_SIZE/8) 158#define STACK_WARN (THREAD_SIZE/8)
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index b93e09a0fa21..37813b5ddc37 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -147,6 +147,8 @@
147#define MSR_PP1_ENERGY_STATUS 0x00000641 147#define MSR_PP1_ENERGY_STATUS 0x00000641
148#define MSR_PP1_POLICY 0x00000642 148#define MSR_PP1_POLICY 0x00000642
149 149
150#define MSR_CORE_C1_RES 0x00000660
151
150#define MSR_AMD64_MC0_MASK 0xc0010044 152#define MSR_AMD64_MC0_MASK 0xc0010044
151 153
152#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) 154#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ed165d657380..d278736bf774 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -62,6 +62,7 @@ unsigned disabled_cpus;
62 62
63/* Processor that is doing the boot up */ 63/* Processor that is doing the boot up */
64unsigned int boot_cpu_physical_apicid = -1U; 64unsigned int boot_cpu_physical_apicid = -1U;
65EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
65 66
66/* 67/*
67 * The highest APIC ID seen during enumeration. 68 * The highest APIC ID seen during enumeration.
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 96f958d8cd45..bc4a088f9023 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -330,8 +330,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
330 INTEL_I915GM_IDS(gen3_stolen_size), 330 INTEL_I915GM_IDS(gen3_stolen_size),
331 INTEL_I945G_IDS(gen3_stolen_size), 331 INTEL_I945G_IDS(gen3_stolen_size),
332 INTEL_I945GM_IDS(gen3_stolen_size), 332 INTEL_I945GM_IDS(gen3_stolen_size),
333 INTEL_VLV_M_IDS(gen3_stolen_size), 333 INTEL_VLV_M_IDS(gen6_stolen_size),
334 INTEL_VLV_D_IDS(gen3_stolen_size), 334 INTEL_VLV_D_IDS(gen6_stolen_size),
335 INTEL_PINEVIEW_IDS(gen3_stolen_size), 335 INTEL_PINEVIEW_IDS(gen3_stolen_size),
336 INTEL_I965G_IDS(gen3_stolen_size), 336 INTEL_I965G_IDS(gen3_stolen_size),
337 INTEL_G33_IDS(gen3_stolen_size), 337 INTEL_G33_IDS(gen3_stolen_size),
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index da3c599584a3..c752cb43e52f 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -558,6 +558,17 @@ void native_machine_shutdown(void)
558{ 558{
559 /* Stop the cpus and apics */ 559 /* Stop the cpus and apics */
560#ifdef CONFIG_X86_IO_APIC 560#ifdef CONFIG_X86_IO_APIC
561 /*
562 * Disabling IO APIC before local APIC is a workaround for
563 * erratum AVR31 in "Intel Atom Processor C2000 Product Family
564 * Specification Update". In this situation, interrupts that target
565 * a Logical Processor whose Local APIC is either in the process of
566 * being hardware disabled or software disabled are neither delivered
567 * nor discarded. When this erratum occurs, the processor may hang.
568 *
569 * Even without the erratum, it still makes sense to quiet IO APIC
570 * before disabling Local APIC.
571 */
561 disable_IO_APIC(); 572 disable_IO_APIC();
562#endif 573#endif
563 574
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5439117d5c4c..dec48bfaddb8 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
143 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; 143 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
144} 144}
145 145
146#define KVM_X2APIC_CID_BITS 0
147
146static void recalculate_apic_map(struct kvm *kvm) 148static void recalculate_apic_map(struct kvm *kvm)
147{ 149{
148 struct kvm_apic_map *new, *old = NULL; 150 struct kvm_apic_map *new, *old = NULL;
@@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm)
180 if (apic_x2apic_mode(apic)) { 182 if (apic_x2apic_mode(apic)) {
181 new->ldr_bits = 32; 183 new->ldr_bits = 32;
182 new->cid_shift = 16; 184 new->cid_shift = 16;
183 new->cid_mask = new->lid_mask = 0xffff; 185 new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
186 new->lid_mask = 0xffff;
184 } else if (kvm_apic_sw_enabled(apic) && 187 } else if (kvm_apic_sw_enabled(apic) &&
185 !new->cid_mask /* flat mode */ && 188 !new->cid_mask /* flat mode */ &&
186 kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { 189 kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
@@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
841 ASSERT(apic != NULL); 844 ASSERT(apic != NULL);
842 845
843 /* if initial count is 0, current count should also be 0 */ 846 /* if initial count is 0, current count should also be 0 */
844 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) 847 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
848 apic->lapic_timer.period == 0)
845 return 0; 849 return 0;
846 850
847 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); 851 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
@@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
1691void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) 1695void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1692{ 1696{
1693 u32 data; 1697 u32 data;
1694 void *vapic;
1695 1698
1696 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) 1699 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
1697 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); 1700 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
@@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1699 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 1702 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
1700 return; 1703 return;
1701 1704
1702 vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1705 kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
1703 data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); 1706 sizeof(u32));
1704 kunmap_atomic(vapic);
1705 1707
1706 apic_set_tpr(vcpu->arch.apic, data & 0xff); 1708 apic_set_tpr(vcpu->arch.apic, data & 0xff);
1707} 1709}
@@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1737 u32 data, tpr; 1739 u32 data, tpr;
1738 int max_irr, max_isr; 1740 int max_irr, max_isr;
1739 struct kvm_lapic *apic = vcpu->arch.apic; 1741 struct kvm_lapic *apic = vcpu->arch.apic;
1740 void *vapic;
1741 1742
1742 apic_sync_pv_eoi_to_guest(vcpu, apic); 1743 apic_sync_pv_eoi_to_guest(vcpu, apic);
1743 1744
@@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1753 max_isr = 0; 1754 max_isr = 0;
1754 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 1755 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
1755 1756
1756 vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1757 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
1757 *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; 1758 sizeof(u32));
1758 kunmap_atomic(vapic);
1759} 1759}
1760 1760
1761void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 1761int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
1762{ 1762{
1763 vcpu->arch.apic->vapic_addr = vapic_addr; 1763 if (vapic_addr) {
1764 if (vapic_addr) 1764 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1765 &vcpu->arch.apic->vapic_cache,
1766 vapic_addr, sizeof(u32)))
1767 return -EINVAL;
1765 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1768 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1766 else 1769 } else {
1767 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1770 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1771 }
1772
1773 vcpu->arch.apic->vapic_addr = vapic_addr;
1774 return 0;
1768} 1775}
1769 1776
1770int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1777int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c730ac9fe801..c8b0d0d2da5c 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -34,7 +34,7 @@ struct kvm_lapic {
34 */ 34 */
35 void *regs; 35 void *regs;
36 gpa_t vapic_addr; 36 gpa_t vapic_addr;
37 struct page *vapic_page; 37 struct gfn_to_hva_cache vapic_cache;
38 unsigned long pending_events; 38 unsigned long pending_events;
39 unsigned int sipi_vector; 39 unsigned int sipi_vector;
40}; 40};
@@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
76void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); 76void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
77void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); 77void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
78 78
79void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); 79int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
80void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); 80void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
81void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); 81void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
82 82
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index daff69e21150..1185fe7a7f47 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -296,4 +296,4 @@ static struct kernel_param_ops audit_param_ops = {
296 .get = param_get_bool, 296 .get = param_get_bool,
297}; 297};
298 298
299module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644); 299arch_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 21ef1ba184ae..5d004da1e35d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3214 r = -EFAULT; 3214 r = -EFAULT;
3215 if (copy_from_user(&va, argp, sizeof va)) 3215 if (copy_from_user(&va, argp, sizeof va))
3216 goto out; 3216 goto out;
3217 r = 0; 3217 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3218 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3219 break; 3218 break;
3220 } 3219 }
3221 case KVM_X86_SETUP_MCE: { 3220 case KVM_X86_SETUP_MCE: {
@@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5739 !kvm_event_needs_reinjection(vcpu); 5738 !kvm_event_needs_reinjection(vcpu);
5740} 5739}
5741 5740
5742static int vapic_enter(struct kvm_vcpu *vcpu)
5743{
5744 struct kvm_lapic *apic = vcpu->arch.apic;
5745 struct page *page;
5746
5747 if (!apic || !apic->vapic_addr)
5748 return 0;
5749
5750 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5751 if (is_error_page(page))
5752 return -EFAULT;
5753
5754 vcpu->arch.apic->vapic_page = page;
5755 return 0;
5756}
5757
5758static void vapic_exit(struct kvm_vcpu *vcpu)
5759{
5760 struct kvm_lapic *apic = vcpu->arch.apic;
5761 int idx;
5762
5763 if (!apic || !apic->vapic_addr)
5764 return;
5765
5766 idx = srcu_read_lock(&vcpu->kvm->srcu);
5767 kvm_release_page_dirty(apic->vapic_page);
5768 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5769 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5770}
5771
5772static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5741static void update_cr8_intercept(struct kvm_vcpu *vcpu)
5773{ 5742{
5774 int max_irr, tpr; 5743 int max_irr, tpr;
@@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
6069 struct kvm *kvm = vcpu->kvm; 6038 struct kvm *kvm = vcpu->kvm;
6070 6039
6071 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6040 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6072 r = vapic_enter(vcpu);
6073 if (r) {
6074 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6075 return r;
6076 }
6077 6041
6078 r = 1; 6042 r = 1;
6079 while (r > 0) { 6043 while (r > 0) {
@@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
6132 6096
6133 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6097 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6134 6098
6135 vapic_exit(vcpu);
6136
6137 return r; 6099 return r;
6138} 6100}
6139 6101
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index a7cccb6d7fec..c96314abd144 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -61,6 +61,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
61#if PAGETABLE_LEVELS > 2 61#if PAGETABLE_LEVELS > 2
62void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 62void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
63{ 63{
64 struct page *page = virt_to_page(pmd);
64 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 65 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
65 /* 66 /*
66 * NOTE! For PAE, any changes to the top page-directory-pointer-table 67 * NOTE! For PAE, any changes to the top page-directory-pointer-table
@@ -69,7 +70,8 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
69#ifdef CONFIG_X86_PAE 70#ifdef CONFIG_X86_PAE
70 tlb->need_flush_all = 1; 71 tlb->need_flush_all = 1;
71#endif 72#endif
72 tlb_remove_page(tlb, virt_to_page(pmd)); 73 pgtable_pmd_page_dtor(page);
74 tlb_remove_page(tlb, page);
73} 75}
74 76
75#if PAGETABLE_LEVELS > 3 77#if PAGETABLE_LEVELS > 3
@@ -209,7 +211,7 @@ static int preallocate_pmds(pmd_t *pmds[])
209 if (!pmd) 211 if (!pmd)
210 failed = true; 212 failed = true;
211 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { 213 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
212 free_page((unsigned long)pmds[i]); 214 free_page((unsigned long)pmd);
213 pmd = NULL; 215 pmd = NULL;
214 failed = true; 216 failed = true;
215 } 217 }
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 7fb24e53d4c8..4f25ec077552 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -518,7 +518,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
518 sd = &info->sd; 518 sd = &info->sd;
519 sd->domain = domain; 519 sd->domain = domain;
520 sd->node = node; 520 sd->node = node;
521 sd->acpi = device->handle; 521 sd->companion = device;
522 /* 522 /*
523 * Maybe the desired pci bus has been already scanned. In such case 523 * Maybe the desired pci bus has been already scanned. In such case
524 * it is unnecessary to scan the pci bus with the given domain,busnum. 524 * it is unnecessary to scan the pci bus with the given domain,busnum.
@@ -589,7 +589,7 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
589{ 589{
590 struct pci_sysdata *sd = bridge->bus->sysdata; 590 struct pci_sysdata *sd = bridge->bus->sysdata;
591 591
592 ACPI_HANDLE_SET(&bridge->dev, sd->acpi); 592 ACPI_COMPANION_SET(&bridge->dev, sd->companion);
593 return 0; 593 return 0;
594} 594}
595 595
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
index 6599a0027b76..81b506d5befd 100644
--- a/arch/x86/platform/efi/early_printk.c
+++ b/arch/x86/platform/efi/early_printk.c
@@ -142,7 +142,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
142 efi_y += font->height; 142 efi_y += font->height;
143 } 143 }
144 144
145 if (efi_y + font->height >= si->lfb_height) { 145 if (efi_y + font->height > si->lfb_height) {
146 u32 i; 146 u32 i;
147 147
148 efi_y -= font->height; 148 efi_y -= font->height;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 92c02344a060..cceb813044ef 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -690,13 +690,6 @@ void __init efi_init(void)
690 690
691 set_bit(EFI_MEMMAP, &x86_efi_facility); 691 set_bit(EFI_MEMMAP, &x86_efi_facility);
692 692
693#ifdef CONFIG_X86_32
694 if (efi_is_native()) {
695 x86_platform.get_wallclock = efi_get_time;
696 x86_platform.set_wallclock = efi_set_rtc_mmss;
697 }
698#endif
699
700#if EFI_DEBUG 693#if EFI_DEBUG
701 print_efi_memmap(); 694 print_efi_memmap();
702#endif 695#endif
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 0f92173a12b6..efe4d7220397 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1070,12 +1070,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1070 unsigned long status; 1070 unsigned long status;
1071 1071
1072 bcp = &per_cpu(bau_control, cpu); 1072 bcp = &per_cpu(bau_control, cpu);
1073 stat = bcp->statp;
1074 stat->s_enters++;
1075 1073
1076 if (bcp->nobau) 1074 if (bcp->nobau)
1077 return cpumask; 1075 return cpumask;
1078 1076
1077 stat = bcp->statp;
1078 stat->s_enters++;
1079
1079 if (bcp->busy) { 1080 if (bcp->busy) {
1080 descriptor_status = 1081 descriptor_status =
1081 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0); 1082 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 88692871823f..9cac82588cbc 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
73 -march=i386 -mregparm=3 \ 73 -march=i386 -mregparm=3 \
74 -include $(srctree)/$(src)/../../boot/code16gcc.h \ 74 -include $(srctree)/$(src)/../../boot/code16gcc.h \
75 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ 75 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
76 -mno-mmx -mno-sse \
76 $(call cc-option, -ffreestanding) \ 77 $(call cc-option, -ffreestanding) \
77 $(call cc-option, -fno-toplevel-reorder,\ 78 $(call cc-option, -fno-toplevel-reorder,\
78 $(call cc-option, -fno-unit-at-a-time)) \ 79 $(call cc-option, -fno-unit-at-a-time)) \
79 $(call cc-option, -fno-stack-protector) \ 80 $(call cc-option, -fno-stack-protector) \
80 $(call cc-option, -mpreferred-stack-boundary=2) 81 $(call cc-option, -mpreferred-stack-boundary=2)
81KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ 82KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 14ef8d1dbc33..ed56a1c4ae73 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -31,6 +31,11 @@ config X86_64
31 def_bool 64BIT 31 def_bool 64BIT
32 select MODULES_USE_ELF_RELA 32 select MODULES_USE_ELF_RELA
33 33
34config ARCH_DEFCONFIG
35 string
36 default "arch/um/configs/i386_defconfig" if X86_32
37 default "arch/um/configs/x86_64_defconfig" if X86_64
38
34config RWSEM_XCHGADD_ALGORITHM 39config RWSEM_XCHGADD_ALGORITHM
35 def_bool 64BIT 40 def_bool 64BIT
36 41
diff --git a/arch/x86/um/asm/processor_32.h b/arch/x86/um/asm/processor_32.h
index 6c6689e574ce..c112de81c9e1 100644
--- a/arch/x86/um/asm/processor_32.h
+++ b/arch/x86/um/asm/processor_32.h
@@ -33,6 +33,8 @@ struct arch_thread {
33 .faultinfo = { 0, 0, 0 } \ 33 .faultinfo = { 0, 0, 0 } \
34} 34}
35 35
36#define STACKSLOTS_PER_LINE 8
37
36static inline void arch_flush_thread(struct arch_thread *thread) 38static inline void arch_flush_thread(struct arch_thread *thread)
37{ 39{
38 /* Clear any TLS still hanging */ 40 /* Clear any TLS still hanging */
@@ -53,4 +55,7 @@ static inline void arch_copy_thread(struct arch_thread *from,
53#define current_text_addr() \ 55#define current_text_addr() \
54 ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) 56 ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
55 57
58#define current_sp() ({ void *sp; __asm__("movl %%esp, %0" : "=r" (sp) : ); sp; })
59#define current_bp() ({ unsigned long bp; __asm__("movl %%ebp, %0" : "=r" (bp) : ); bp; })
60
56#endif 61#endif
diff --git a/arch/x86/um/asm/processor_64.h b/arch/x86/um/asm/processor_64.h
index 4b02a8455bd1..c3be85205a65 100644
--- a/arch/x86/um/asm/processor_64.h
+++ b/arch/x86/um/asm/processor_64.h
@@ -19,6 +19,8 @@ struct arch_thread {
19 .fs = 0, \ 19 .fs = 0, \
20 .faultinfo = { 0, 0, 0 } } 20 .faultinfo = { 0, 0, 0 } }
21 21
22#define STACKSLOTS_PER_LINE 4
23
22static inline void arch_flush_thread(struct arch_thread *thread) 24static inline void arch_flush_thread(struct arch_thread *thread)
23{ 25{
24} 26}
@@ -32,4 +34,7 @@ static inline void arch_copy_thread(struct arch_thread *from,
32#define current_text_addr() \ 34#define current_text_addr() \
33 ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; }) 35 ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; })
34 36
37#define current_sp() ({ void *sp; __asm__("movq %%rsp, %0" : "=r" (sp) : ); sp; })
38#define current_bp() ({ unsigned long bp; __asm__("movq %%rbp, %0" : "=r" (bp) : ); bp; })
39
35#endif 40#endif
diff --git a/arch/x86/um/sysrq_32.c b/arch/x86/um/sysrq_32.c
index c9bee5b8c0d3..16ee0e450e3e 100644
--- a/arch/x86/um/sysrq_32.c
+++ b/arch/x86/um/sysrq_32.c
@@ -30,70 +30,4 @@ void show_regs(struct pt_regs *regs)
30 printk(" DS: %04lx ES: %04lx\n", 30 printk(" DS: %04lx ES: %04lx\n",
31 0xffff & PT_REGS_DS(regs), 31 0xffff & PT_REGS_DS(regs),
32 0xffff & PT_REGS_ES(regs)); 32 0xffff & PT_REGS_ES(regs));
33
34 show_trace(NULL, (unsigned long *) &regs);
35} 33}
36
37/* Copied from i386. */
38static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
39{
40 return p > (void *)tinfo &&
41 p < (void *)tinfo + THREAD_SIZE - 3;
42}
43
44/* Adapted from i386 (we also print the address we read from). */
45static inline unsigned long print_context_stack(struct thread_info *tinfo,
46 unsigned long *stack, unsigned long ebp)
47{
48 unsigned long addr;
49
50#ifdef CONFIG_FRAME_POINTER
51 while (valid_stack_ptr(tinfo, (void *)ebp)) {
52 addr = *(unsigned long *)(ebp + 4);
53 printk("%08lx: [<%08lx>]", ebp + 4, addr);
54 print_symbol(" %s", addr);
55 printk("\n");
56 ebp = *(unsigned long *)ebp;
57 }
58#else
59 while (valid_stack_ptr(tinfo, stack)) {
60 addr = *stack;
61 if (__kernel_text_address(addr)) {
62 printk("%08lx: [<%08lx>]", (unsigned long) stack, addr);
63 print_symbol(" %s", addr);
64 printk("\n");
65 }
66 stack++;
67 }
68#endif
69 return ebp;
70}
71
72void show_trace(struct task_struct* task, unsigned long * stack)
73{
74 unsigned long ebp;
75 struct thread_info *context;
76
77 /* Turn this into BUG_ON if possible. */
78 if (!stack) {
79 stack = (unsigned long*) &stack;
80 printk("show_trace: got NULL stack, implicit assumption task == current");
81 WARN_ON(1);
82 }
83
84 if (!task)
85 task = current;
86
87 if (task != current) {
88 ebp = (unsigned long) KSTK_EBP(task);
89 } else {
90 asm ("movl %%ebp, %0" : "=r" (ebp) : );
91 }
92
93 context = (struct thread_info *)
94 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
95 print_context_stack(context, stack, ebp);
96
97 printk("\n");
98}
99
diff --git a/arch/x86/um/sysrq_64.c b/arch/x86/um/sysrq_64.c
index a0e7fb1134a0..38b4e4abd0f8 100644
--- a/arch/x86/um/sysrq_64.c
+++ b/arch/x86/um/sysrq_64.c
@@ -12,7 +12,7 @@
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13#include <asm/sysrq.h> 13#include <asm/sysrq.h>
14 14
15void __show_regs(struct pt_regs *regs) 15void show_regs(struct pt_regs *regs)
16{ 16{
17 printk("\n"); 17 printk("\n");
18 print_modules(); 18 print_modules();
@@ -33,9 +33,3 @@ void __show_regs(struct pt_regs *regs)
33 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", 33 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
34 PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs)); 34 PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs));
35} 35}
36
37void show_regs(struct pt_regs *regs)
38{
39 __show_regs(regs);
40 show_trace(current, (unsigned long *) &regs);
41}
diff --git a/arch/x86/um/vdso/.gitignore b/arch/x86/um/vdso/.gitignore
new file mode 100644
index 000000000000..9cac6d072199
--- /dev/null
+++ b/arch/x86/um/vdso/.gitignore
@@ -0,0 +1,2 @@
1vdso-syms.lds
2vdso.lds
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 9481004ac119..470153e8547c 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -76,8 +76,6 @@ struct thread_info {
76 76
77#endif 77#endif
78 78
79#define PREEMPT_ACTIVE 0x10000000
80
81/* 79/*
82 * macros/functions for gaining access to the thread information structure 80 * macros/functions for gaining access to the thread information structure
83 */ 81 */
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 1610b22edf09..86154eab9523 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -435,9 +435,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
435 uint64_t v; 435 uint64_t v;
436 436
437 do { 437 do {
438 start = u64_stats_fetch_begin(&stat->syncp); 438 start = u64_stats_fetch_begin_bh(&stat->syncp);
439 v = stat->cnt; 439 v = stat->cnt;
440 } while (u64_stats_fetch_retry(&stat->syncp, start)); 440 } while (u64_stats_fetch_retry_bh(&stat->syncp, start));
441 441
442 return v; 442 return v;
443} 443}
@@ -508,9 +508,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
508 struct blkg_rwstat tmp; 508 struct blkg_rwstat tmp;
509 509
510 do { 510 do {
511 start = u64_stats_fetch_begin(&rwstat->syncp); 511 start = u64_stats_fetch_begin_bh(&rwstat->syncp);
512 tmp = *rwstat; 512 tmp = *rwstat;
513 } while (u64_stats_fetch_retry(&rwstat->syncp, start)); 513 } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start));
514 514
515 return tmp; 515 return tmp;
516} 516}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 331e627301ea..fb6f3c0ffa49 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -502,15 +502,6 @@ void blk_abort_flushes(struct request_queue *q)
502 } 502 }
503} 503}
504 504
505static void bio_end_flush(struct bio *bio, int err)
506{
507 if (err)
508 clear_bit(BIO_UPTODATE, &bio->bi_flags);
509 if (bio->bi_private)
510 complete(bio->bi_private);
511 bio_put(bio);
512}
513
514/** 505/**
515 * blkdev_issue_flush - queue a flush 506 * blkdev_issue_flush - queue a flush
516 * @bdev: blockdev to issue flush for 507 * @bdev: blockdev to issue flush for
@@ -526,7 +517,6 @@ static void bio_end_flush(struct bio *bio, int err)
526int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 517int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
527 sector_t *error_sector) 518 sector_t *error_sector)
528{ 519{
529 DECLARE_COMPLETION_ONSTACK(wait);
530 struct request_queue *q; 520 struct request_queue *q;
531 struct bio *bio; 521 struct bio *bio;
532 int ret = 0; 522 int ret = 0;
@@ -548,13 +538,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
548 return -ENXIO; 538 return -ENXIO;
549 539
550 bio = bio_alloc(gfp_mask, 0); 540 bio = bio_alloc(gfp_mask, 0);
551 bio->bi_end_io = bio_end_flush;
552 bio->bi_bdev = bdev; 541 bio->bi_bdev = bdev;
553 bio->bi_private = &wait;
554 542
555 bio_get(bio); 543 ret = submit_bio_wait(WRITE_FLUSH, bio);
556 submit_bio(WRITE_FLUSH, bio);
557 wait_for_completion_io(&wait);
558 544
559 /* 545 /*
560 * The driver must store the error location in ->bi_sector, if 546 * The driver must store the error location in ->bi_sector, if
@@ -564,9 +550,6 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
564 if (error_sector) 550 if (error_sector)
565 *error_sector = bio->bi_sector; 551 *error_sector = bio->bi_sector;
566 552
567 if (!bio_flagged(bio, BIO_UPTODATE))
568 ret = -EIO;
569
570 bio_put(bio); 553 bio_put(bio);
571 return ret; 554 return ret;
572} 555}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 862f458d4760..c79126e11030 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -171,9 +171,12 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
171} 171}
172EXPORT_SYMBOL(blk_mq_can_queue); 172EXPORT_SYMBOL(blk_mq_can_queue);
173 173
174static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq, 174static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
175 unsigned int rw_flags) 175 struct request *rq, unsigned int rw_flags)
176{ 176{
177 if (blk_queue_io_stat(q))
178 rw_flags |= REQ_IO_STAT;
179
177 rq->mq_ctx = ctx; 180 rq->mq_ctx = ctx;
178 rq->cmd_flags = rw_flags; 181 rq->cmd_flags = rw_flags;
179 ctx->rq_dispatched[rw_is_sync(rw_flags)]++; 182 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
@@ -197,12 +200,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
197 200
198 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); 201 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
199 if (rq) { 202 if (rq) {
200 blk_mq_rq_ctx_init(ctx, rq, rw); 203 blk_mq_rq_ctx_init(q, ctx, rq, rw);
201 break;
202 } else if (!(gfp & __GFP_WAIT))
203 break; 204 break;
205 }
204 206
205 blk_mq_put_ctx(ctx); 207 blk_mq_put_ctx(ctx);
208 if (!(gfp & __GFP_WAIT))
209 break;
210
206 __blk_mq_run_hw_queue(hctx); 211 __blk_mq_run_hw_queue(hctx);
207 blk_mq_wait_for_tags(hctx->tags); 212 blk_mq_wait_for_tags(hctx->tags);
208 } while (1); 213 } while (1);
@@ -219,7 +224,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
219 return NULL; 224 return NULL;
220 225
221 rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); 226 rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
222 blk_mq_put_ctx(rq->mq_ctx); 227 if (rq)
228 blk_mq_put_ctx(rq->mq_ctx);
223 return rq; 229 return rq;
224} 230}
225 231
@@ -232,7 +238,8 @@ struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
232 return NULL; 238 return NULL;
233 239
234 rq = blk_mq_alloc_request_pinned(q, rw, gfp, true); 240 rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
235 blk_mq_put_ctx(rq->mq_ctx); 241 if (rq)
242 blk_mq_put_ctx(rq->mq_ctx);
236 return rq; 243 return rq;
237} 244}
238EXPORT_SYMBOL(blk_mq_alloc_reserved_request); 245EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
@@ -305,12 +312,12 @@ void blk_mq_complete_request(struct request *rq, int error)
305 312
306 blk_account_io_completion(rq, bytes); 313 blk_account_io_completion(rq, bytes);
307 314
315 blk_account_io_done(rq);
316
308 if (rq->end_io) 317 if (rq->end_io)
309 rq->end_io(rq, error); 318 rq->end_io(rq, error);
310 else 319 else
311 blk_mq_free_request(rq); 320 blk_mq_free_request(rq);
312
313 blk_account_io_done(rq);
314} 321}
315 322
316void __blk_mq_end_io(struct request *rq, int error) 323void __blk_mq_end_io(struct request *rq, int error)
@@ -718,6 +725,8 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
718{ 725{
719 struct blk_mq_ctx *ctx = rq->mq_ctx; 726 struct blk_mq_ctx *ctx = rq->mq_ctx;
720 727
728 trace_block_rq_insert(hctx->queue, rq);
729
721 list_add_tail(&rq->queuelist, &ctx->rq_list); 730 list_add_tail(&rq->queuelist, &ctx->rq_list);
722 blk_mq_hctx_mark_pending(hctx, ctx); 731 blk_mq_hctx_mark_pending(hctx, ctx);
723 732
@@ -921,7 +930,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
921 trace_block_getrq(q, bio, rw); 930 trace_block_getrq(q, bio, rw);
922 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); 931 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
923 if (likely(rq)) 932 if (likely(rq))
924 blk_mq_rq_ctx_init(ctx, rq, rw); 933 blk_mq_rq_ctx_init(q, ctx, rq, rw);
925 else { 934 else {
926 blk_mq_put_ctx(ctx); 935 blk_mq_put_ctx(ctx);
927 trace_block_sleeprq(q, bio, rw); 936 trace_block_sleeprq(q, bio, rw);
@@ -1377,6 +1386,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1377 q->queue_hw_ctx = hctxs; 1386 q->queue_hw_ctx = hctxs;
1378 1387
1379 q->mq_ops = reg->ops; 1388 q->mq_ops = reg->ops;
1389 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1380 1390
1381 blk_queue_make_request(q, blk_mq_make_request); 1391 blk_queue_make_request(q, blk_mq_make_request);
1382 blk_queue_rq_timed_out(q, reg->ops->timeout); 1392 blk_queue_rq_timed_out(q, reg->ops->timeout);
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index a8287b49d062..dc51f467a560 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -96,6 +96,7 @@
96 * - Code works, detects all the partitions. 96 * - Code works, detects all the partitions.
97 * 97 *
98 ************************************************************/ 98 ************************************************************/
99#include <linux/kernel.h>
99#include <linux/crc32.h> 100#include <linux/crc32.h>
100#include <linux/ctype.h> 101#include <linux/ctype.h>
101#include <linux/math64.h> 102#include <linux/math64.h>
@@ -715,8 +716,8 @@ int efi_partition(struct parsed_partitions *state)
715 efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid); 716 efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid);
716 717
717 /* Naively convert UTF16-LE to 7 bits. */ 718 /* Naively convert UTF16-LE to 7 bits. */
718 label_max = min(sizeof(info->volname) - 1, 719 label_max = min(ARRAY_SIZE(info->volname) - 1,
719 sizeof(ptes[i].partition_name)); 720 ARRAY_SIZE(ptes[i].partition_name));
720 info->volname[label_max] = 0; 721 info->volname[label_max] = 0;
721 while (label_count < label_max) { 722 while (label_count < label_max) {
722 u8 c = ptes[i].partition_name[label_count] & 0xff; 723 u8 c = ptes[i].partition_name[label_count] & 0xff;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 71f337aefa39..7bcb70d216e1 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -174,9 +174,8 @@ config CRYPTO_TEST
174 help 174 help
175 Quick & dirty crypto test module. 175 Quick & dirty crypto test module.
176 176
177config CRYPTO_ABLK_HELPER_X86 177config CRYPTO_ABLK_HELPER
178 tristate 178 tristate
179 depends on X86
180 select CRYPTO_CRYPTD 179 select CRYPTO_CRYPTD
181 180
182config CRYPTO_GLUE_HELPER_X86 181config CRYPTO_GLUE_HELPER_X86
@@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL
695 select CRYPTO_AES_X86_64 if 64BIT 694 select CRYPTO_AES_X86_64 if 64BIT
696 select CRYPTO_AES_586 if !64BIT 695 select CRYPTO_AES_586 if !64BIT
697 select CRYPTO_CRYPTD 696 select CRYPTO_CRYPTD
698 select CRYPTO_ABLK_HELPER_X86 697 select CRYPTO_ABLK_HELPER
699 select CRYPTO_ALGAPI 698 select CRYPTO_ALGAPI
700 select CRYPTO_GLUE_HELPER_X86 if 64BIT 699 select CRYPTO_GLUE_HELPER_X86 if 64BIT
701 select CRYPTO_LRW 700 select CRYPTO_LRW
@@ -895,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
895 depends on CRYPTO 894 depends on CRYPTO
896 select CRYPTO_ALGAPI 895 select CRYPTO_ALGAPI
897 select CRYPTO_CRYPTD 896 select CRYPTO_CRYPTD
898 select CRYPTO_ABLK_HELPER_X86 897 select CRYPTO_ABLK_HELPER
899 select CRYPTO_GLUE_HELPER_X86 898 select CRYPTO_GLUE_HELPER_X86
900 select CRYPTO_CAMELLIA_X86_64 899 select CRYPTO_CAMELLIA_X86_64
901 select CRYPTO_LRW 900 select CRYPTO_LRW
@@ -917,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
917 depends on CRYPTO 916 depends on CRYPTO
918 select CRYPTO_ALGAPI 917 select CRYPTO_ALGAPI
919 select CRYPTO_CRYPTD 918 select CRYPTO_CRYPTD
920 select CRYPTO_ABLK_HELPER_X86 919 select CRYPTO_ABLK_HELPER
921 select CRYPTO_GLUE_HELPER_X86 920 select CRYPTO_GLUE_HELPER_X86
922 select CRYPTO_CAMELLIA_X86_64 921 select CRYPTO_CAMELLIA_X86_64
923 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 922 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
@@ -969,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64
969 depends on X86 && 64BIT 968 depends on X86 && 64BIT
970 select CRYPTO_ALGAPI 969 select CRYPTO_ALGAPI
971 select CRYPTO_CRYPTD 970 select CRYPTO_CRYPTD
972 select CRYPTO_ABLK_HELPER_X86 971 select CRYPTO_ABLK_HELPER
973 select CRYPTO_CAST_COMMON 972 select CRYPTO_CAST_COMMON
974 select CRYPTO_CAST5 973 select CRYPTO_CAST5
975 help 974 help
@@ -992,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64
992 depends on X86 && 64BIT 991 depends on X86 && 64BIT
993 select CRYPTO_ALGAPI 992 select CRYPTO_ALGAPI
994 select CRYPTO_CRYPTD 993 select CRYPTO_CRYPTD
995 select CRYPTO_ABLK_HELPER_X86 994 select CRYPTO_ABLK_HELPER
996 select CRYPTO_GLUE_HELPER_X86 995 select CRYPTO_GLUE_HELPER_X86
997 select CRYPTO_CAST_COMMON 996 select CRYPTO_CAST_COMMON
998 select CRYPTO_CAST6 997 select CRYPTO_CAST6
@@ -1110,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
1110 depends on X86 && 64BIT 1109 depends on X86 && 64BIT
1111 select CRYPTO_ALGAPI 1110 select CRYPTO_ALGAPI
1112 select CRYPTO_CRYPTD 1111 select CRYPTO_CRYPTD
1113 select CRYPTO_ABLK_HELPER_X86 1112 select CRYPTO_ABLK_HELPER
1114 select CRYPTO_GLUE_HELPER_X86 1113 select CRYPTO_GLUE_HELPER_X86
1115 select CRYPTO_SERPENT 1114 select CRYPTO_SERPENT
1116 select CRYPTO_LRW 1115 select CRYPTO_LRW
@@ -1132,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586
1132 depends on X86 && !64BIT 1131 depends on X86 && !64BIT
1133 select CRYPTO_ALGAPI 1132 select CRYPTO_ALGAPI
1134 select CRYPTO_CRYPTD 1133 select CRYPTO_CRYPTD
1135 select CRYPTO_ABLK_HELPER_X86 1134 select CRYPTO_ABLK_HELPER
1136 select CRYPTO_GLUE_HELPER_X86 1135 select CRYPTO_GLUE_HELPER_X86
1137 select CRYPTO_SERPENT 1136 select CRYPTO_SERPENT
1138 select CRYPTO_LRW 1137 select CRYPTO_LRW
@@ -1154,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64
1154 depends on X86 && 64BIT 1153 depends on X86 && 64BIT
1155 select CRYPTO_ALGAPI 1154 select CRYPTO_ALGAPI
1156 select CRYPTO_CRYPTD 1155 select CRYPTO_CRYPTD
1157 select CRYPTO_ABLK_HELPER_X86 1156 select CRYPTO_ABLK_HELPER
1158 select CRYPTO_GLUE_HELPER_X86 1157 select CRYPTO_GLUE_HELPER_X86
1159 select CRYPTO_SERPENT 1158 select CRYPTO_SERPENT
1160 select CRYPTO_LRW 1159 select CRYPTO_LRW
@@ -1176,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
1176 depends on X86 && 64BIT 1175 depends on X86 && 64BIT
1177 select CRYPTO_ALGAPI 1176 select CRYPTO_ALGAPI
1178 select CRYPTO_CRYPTD 1177 select CRYPTO_CRYPTD
1179 select CRYPTO_ABLK_HELPER_X86 1178 select CRYPTO_ABLK_HELPER
1180 select CRYPTO_GLUE_HELPER_X86 1179 select CRYPTO_GLUE_HELPER_X86
1181 select CRYPTO_SERPENT 1180 select CRYPTO_SERPENT
1182 select CRYPTO_SERPENT_AVX_X86_64 1181 select CRYPTO_SERPENT_AVX_X86_64
@@ -1292,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
1292 depends on X86 && 64BIT 1291 depends on X86 && 64BIT
1293 select CRYPTO_ALGAPI 1292 select CRYPTO_ALGAPI
1294 select CRYPTO_CRYPTD 1293 select CRYPTO_CRYPTD
1295 select CRYPTO_ABLK_HELPER_X86 1294 select CRYPTO_ABLK_HELPER
1296 select CRYPTO_GLUE_HELPER_X86 1295 select CRYPTO_GLUE_HELPER_X86
1297 select CRYPTO_TWOFISH_COMMON 1296 select CRYPTO_TWOFISH_COMMON
1298 select CRYPTO_TWOFISH_X86_64 1297 select CRYPTO_TWOFISH_X86_64
@@ -1402,6 +1401,9 @@ config CRYPTO_USER_API_SKCIPHER
1402 This option enables the user-spaces interface for symmetric 1401 This option enables the user-spaces interface for symmetric
1403 key cipher algorithms. 1402 key cipher algorithms.
1404 1403
1404config CRYPTO_HASH_INFO
1405 bool
1406
1405source "drivers/crypto/Kconfig" 1407source "drivers/crypto/Kconfig"
1406source crypto/asymmetric_keys/Kconfig 1408source crypto/asymmetric_keys/Kconfig
1407 1409
diff --git a/crypto/Makefile b/crypto/Makefile
index 80019ba8da3a..989c510da8cc 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -2,8 +2,13 @@
2# Cryptographic API 2# Cryptographic API
3# 3#
4 4
5# memneq MUST be built with -Os or -O0 to prevent early-return optimizations
6# that will defeat memneq's actual purpose to prevent timing attacks.
7CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3
8CFLAGS_memneq.o := -Os
9
5obj-$(CONFIG_CRYPTO) += crypto.o 10obj-$(CONFIG_CRYPTO) += crypto.o
6crypto-y := api.o cipher.o compress.o 11crypto-y := api.o cipher.o compress.o memneq.o
7 12
8obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o 13obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
9 14
@@ -104,3 +109,5 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
104obj-$(CONFIG_XOR_BLOCKS) += xor.o 109obj-$(CONFIG_XOR_BLOCKS) += xor.o
105obj-$(CONFIG_ASYNC_CORE) += async_tx/ 110obj-$(CONFIG_ASYNC_CORE) += async_tx/
106obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ 111obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
112obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
113obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
diff --git a/arch/x86/crypto/ablk_helper.c b/crypto/ablk_helper.c
index 43282fe04a8b..ffe7278d4bd8 100644
--- a/arch/x86/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -28,10 +28,11 @@
28#include <linux/crypto.h> 28#include <linux/crypto.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/hardirq.h>
31#include <crypto/algapi.h> 32#include <crypto/algapi.h>
32#include <crypto/cryptd.h> 33#include <crypto/cryptd.h>
33#include <asm/i387.h> 34#include <crypto/ablk_helper.h>
34#include <asm/crypto/ablk_helper.h> 35#include <asm/simd.h>
35 36
36int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, 37int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
37 unsigned int key_len) 38 unsigned int key_len)
@@ -70,11 +71,11 @@ int ablk_encrypt(struct ablkcipher_request *req)
70 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 71 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
71 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); 72 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
72 73
73 if (!irq_fpu_usable()) { 74 if (!may_use_simd()) {
74 struct ablkcipher_request *cryptd_req = 75 struct ablkcipher_request *cryptd_req =
75 ablkcipher_request_ctx(req); 76 ablkcipher_request_ctx(req);
76 77
77 memcpy(cryptd_req, req, sizeof(*req)); 78 *cryptd_req = *req;
78 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 79 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
79 80
80 return crypto_ablkcipher_encrypt(cryptd_req); 81 return crypto_ablkcipher_encrypt(cryptd_req);
@@ -89,11 +90,11 @@ int ablk_decrypt(struct ablkcipher_request *req)
89 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 90 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
90 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); 91 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
91 92
92 if (!irq_fpu_usable()) { 93 if (!may_use_simd()) {
93 struct ablkcipher_request *cryptd_req = 94 struct ablkcipher_request *cryptd_req =
94 ablkcipher_request_ctx(req); 95 ablkcipher_request_ctx(req);
95 96
96 memcpy(cryptd_req, req, sizeof(*req)); 97 *cryptd_req = *req;
97 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 98 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
98 99
99 return crypto_ablkcipher_decrypt(cryptd_req); 100 return crypto_ablkcipher_decrypt(cryptd_req);
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 7d4a8d28277e..40886c489903 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -16,9 +16,7 @@
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <linux/cpumask.h> 17#include <linux/cpumask.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
23#include <linux/sched.h> 21#include <linux/sched.h>
24#include <linux/slab.h> 22#include <linux/slab.h>
@@ -30,8 +28,6 @@
30 28
31#include "internal.h" 29#include "internal.h"
32 30
33static const char *skcipher_default_geniv __read_mostly;
34
35struct ablkcipher_buffer { 31struct ablkcipher_buffer {
36 struct list_head entry; 32 struct list_head entry;
37 struct scatter_walk dst; 33 struct scatter_walk dst;
@@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg)
527 alg->cra_blocksize) 523 alg->cra_blocksize)
528 return "chainiv"; 524 return "chainiv";
529 525
530 return alg->cra_flags & CRYPTO_ALG_ASYNC ? 526 return "eseqiv";
531 "eseqiv" : skcipher_default_geniv;
532} 527}
533 528
534static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) 529static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@@ -709,17 +704,3 @@ err:
709 return ERR_PTR(err); 704 return ERR_PTR(err);
710} 705}
711EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); 706EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
712
713static int __init skcipher_module_init(void)
714{
715 skcipher_default_geniv = num_possible_cpus() > 1 ?
716 "eseqiv" : "chainiv";
717 return 0;
718}
719
720static void skcipher_module_exit(void)
721{
722}
723
724module_init(skcipher_module_init);
725module_exit(skcipher_module_exit);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 0262210cad38..850246206b12 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
114 struct hash_ctx *ctx = ask->private; 114 struct hash_ctx *ctx = ask->private;
115 int err; 115 int err;
116 116
117 if (flags & MSG_SENDPAGE_NOTLAST)
118 flags |= MSG_MORE;
119
117 lock_sock(sk); 120 lock_sock(sk);
118 sg_init_table(ctx->sgl.sg, 1); 121 sg_init_table(ctx->sgl.sg, 1);
119 sg_set_page(ctx->sgl.sg, page, size, offset); 122 sg_set_page(ctx->sgl.sg, page, size, offset);
@@ -161,8 +164,6 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
161 else if (len < ds) 164 else if (len < ds)
162 msg->msg_flags |= MSG_TRUNC; 165 msg->msg_flags |= MSG_TRUNC;
163 166
164 msg->msg_namelen = 0;
165
166 lock_sock(sk); 167 lock_sock(sk);
167 if (ctx->more) { 168 if (ctx->more) {
168 ctx->more = 0; 169 ctx->more = 0;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index a1c4f0a55583..a19c027b29bd 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
378 struct skcipher_sg_list *sgl; 378 struct skcipher_sg_list *sgl;
379 int err = -EINVAL; 379 int err = -EINVAL;
380 380
381 if (flags & MSG_SENDPAGE_NOTLAST)
382 flags |= MSG_MORE;
383
381 lock_sock(sk); 384 lock_sock(sk);
382 if (!ctx->more && ctx->used) 385 if (!ctx->more && ctx->used)
383 goto unlock; 386 goto unlock;
@@ -432,7 +435,6 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
432 long copied = 0; 435 long copied = 0;
433 436
434 lock_sock(sk); 437 lock_sock(sk);
435 msg->msg_namelen = 0;
436 for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; 438 for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
437 iovlen--, iov++) { 439 iovlen--, iov++) {
438 unsigned long seglen = iov->iov_len; 440 unsigned long seglen = iov->iov_len;
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index c0bb3778f1ae..666f1962a160 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -230,11 +230,11 @@ remainder:
230 */ 230 */
231 if (byte_count < DEFAULT_BLK_SZ) { 231 if (byte_count < DEFAULT_BLK_SZ) {
232empty_rbuf: 232empty_rbuf:
233 for (; ctx->rand_data_valid < DEFAULT_BLK_SZ; 233 while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
234 ctx->rand_data_valid++) {
235 *ptr = ctx->rand_data[ctx->rand_data_valid]; 234 *ptr = ctx->rand_data[ctx->rand_data_valid];
236 ptr++; 235 ptr++;
237 byte_count--; 236 byte_count--;
237 ctx->rand_data_valid++;
238 if (byte_count == 0) 238 if (byte_count == 0)
239 goto done; 239 goto done;
240 } 240 }
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 6d2c2ea12559..03a6eb95ab50 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -12,6 +12,8 @@ if ASYMMETRIC_KEY_TYPE
12config ASYMMETRIC_PUBLIC_KEY_SUBTYPE 12config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
13 tristate "Asymmetric public-key crypto algorithm subtype" 13 tristate "Asymmetric public-key crypto algorithm subtype"
14 select MPILIB 14 select MPILIB
15 select PUBLIC_KEY_ALGO_RSA
16 select CRYPTO_HASH_INFO
15 help 17 help
16 This option provides support for asymmetric public key type handling. 18 This option provides support for asymmetric public key type handling.
17 If signature generation and/or verification are to be used, 19 If signature generation and/or verification are to be used,
@@ -20,8 +22,8 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
20 22
21config PUBLIC_KEY_ALGO_RSA 23config PUBLIC_KEY_ALGO_RSA
22 tristate "RSA public-key algorithm" 24 tristate "RSA public-key algorithm"
23 depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
24 select MPILIB_EXTRA 25 select MPILIB_EXTRA
26 select MPILIB
25 help 27 help
26 This option enables support for the RSA algorithm (PKCS#1, RFC3447). 28 This option enables support for the RSA algorithm (PKCS#1, RFC3447).
27 29
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index cf807654d221..b77eb5304788 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -209,6 +209,7 @@ struct key_type key_type_asymmetric = {
209 .match = asymmetric_key_match, 209 .match = asymmetric_key_match,
210 .destroy = asymmetric_key_destroy, 210 .destroy = asymmetric_key_destroy,
211 .describe = asymmetric_key_describe, 211 .describe = asymmetric_key_describe,
212 .def_lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE,
212}; 213};
213EXPORT_SYMBOL_GPL(key_type_asymmetric); 214EXPORT_SYMBOL_GPL(key_type_asymmetric);
214 215
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index cb2e29180a87..97eb001960b9 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -22,29 +22,25 @@
22 22
23MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
24 24
25const char *const pkey_algo[PKEY_ALGO__LAST] = { 25const char *const pkey_algo_name[PKEY_ALGO__LAST] = {
26 [PKEY_ALGO_DSA] = "DSA", 26 [PKEY_ALGO_DSA] = "DSA",
27 [PKEY_ALGO_RSA] = "RSA", 27 [PKEY_ALGO_RSA] = "RSA",
28}; 28};
29EXPORT_SYMBOL_GPL(pkey_algo); 29EXPORT_SYMBOL_GPL(pkey_algo_name);
30 30
31const char *const pkey_hash_algo[PKEY_HASH__LAST] = { 31const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST] = {
32 [PKEY_HASH_MD4] = "md4", 32#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \
33 [PKEY_HASH_MD5] = "md5", 33 defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE)
34 [PKEY_HASH_SHA1] = "sha1", 34 [PKEY_ALGO_RSA] = &RSA_public_key_algorithm,
35 [PKEY_HASH_RIPE_MD_160] = "rmd160", 35#endif
36 [PKEY_HASH_SHA256] = "sha256",
37 [PKEY_HASH_SHA384] = "sha384",
38 [PKEY_HASH_SHA512] = "sha512",
39 [PKEY_HASH_SHA224] = "sha224",
40}; 36};
41EXPORT_SYMBOL_GPL(pkey_hash_algo); 37EXPORT_SYMBOL_GPL(pkey_algo);
42 38
43const char *const pkey_id_type[PKEY_ID_TYPE__LAST] = { 39const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST] = {
44 [PKEY_ID_PGP] = "PGP", 40 [PKEY_ID_PGP] = "PGP",
45 [PKEY_ID_X509] = "X509", 41 [PKEY_ID_X509] = "X509",
46}; 42};
47EXPORT_SYMBOL_GPL(pkey_id_type); 43EXPORT_SYMBOL_GPL(pkey_id_type_name);
48 44
49/* 45/*
50 * Provide a part of a description of the key for /proc/keys. 46 * Provide a part of a description of the key for /proc/keys.
@@ -56,7 +52,7 @@ static void public_key_describe(const struct key *asymmetric_key,
56 52
57 if (key) 53 if (key)
58 seq_printf(m, "%s.%s", 54 seq_printf(m, "%s.%s",
59 pkey_id_type[key->id_type], key->algo->name); 55 pkey_id_type_name[key->id_type], key->algo->name);
60} 56}
61 57
62/* 58/*
@@ -78,21 +74,45 @@ EXPORT_SYMBOL_GPL(public_key_destroy);
78/* 74/*
79 * Verify a signature using a public key. 75 * Verify a signature using a public key.
80 */ 76 */
81static int public_key_verify_signature(const struct key *key, 77int public_key_verify_signature(const struct public_key *pk,
82 const struct public_key_signature *sig) 78 const struct public_key_signature *sig)
83{ 79{
84 const struct public_key *pk = key->payload.data; 80 const struct public_key_algorithm *algo;
81
82 BUG_ON(!pk);
83 BUG_ON(!pk->mpi[0]);
84 BUG_ON(!pk->mpi[1]);
85 BUG_ON(!sig);
86 BUG_ON(!sig->digest);
87 BUG_ON(!sig->mpi[0]);
88
89 algo = pk->algo;
90 if (!algo) {
91 if (pk->pkey_algo >= PKEY_ALGO__LAST)
92 return -ENOPKG;
93 algo = pkey_algo[pk->pkey_algo];
94 if (!algo)
95 return -ENOPKG;
96 }
85 97
86 if (!pk->algo->verify_signature) 98 if (!algo->verify_signature)
87 return -ENOTSUPP; 99 return -ENOTSUPP;
88 100
89 if (sig->nr_mpi != pk->algo->n_sig_mpi) { 101 if (sig->nr_mpi != algo->n_sig_mpi) {
90 pr_debug("Signature has %u MPI not %u\n", 102 pr_debug("Signature has %u MPI not %u\n",
91 sig->nr_mpi, pk->algo->n_sig_mpi); 103 sig->nr_mpi, algo->n_sig_mpi);
92 return -EINVAL; 104 return -EINVAL;
93 } 105 }
94 106
95 return pk->algo->verify_signature(pk, sig); 107 return algo->verify_signature(pk, sig);
108}
109EXPORT_SYMBOL_GPL(public_key_verify_signature);
110
111static int public_key_verify_signature_2(const struct key *key,
112 const struct public_key_signature *sig)
113{
114 const struct public_key *pk = key->payload.data;
115 return public_key_verify_signature(pk, sig);
96} 116}
97 117
98/* 118/*
@@ -103,6 +123,6 @@ struct asymmetric_key_subtype public_key_subtype = {
103 .name = "public_key", 123 .name = "public_key",
104 .describe = public_key_describe, 124 .describe = public_key_describe,
105 .destroy = public_key_destroy, 125 .destroy = public_key_destroy,
106 .verify_signature = public_key_verify_signature, 126 .verify_signature = public_key_verify_signature_2,
107}; 127};
108EXPORT_SYMBOL_GPL(public_key_subtype); 128EXPORT_SYMBOL_GPL(public_key_subtype);
diff --git a/crypto/asymmetric_keys/public_key.h b/crypto/asymmetric_keys/public_key.h
index 5e5e35626899..5c37a22a0637 100644
--- a/crypto/asymmetric_keys/public_key.h
+++ b/crypto/asymmetric_keys/public_key.h
@@ -28,3 +28,9 @@ struct public_key_algorithm {
28}; 28};
29 29
30extern const struct public_key_algorithm RSA_public_key_algorithm; 30extern const struct public_key_algorithm RSA_public_key_algorithm;
31
32/*
33 * public_key.c
34 */
35extern int public_key_verify_signature(const struct public_key *pk,
36 const struct public_key_signature *sig);
diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c
index 4a6a0696f8a3..459cf97a75e2 100644
--- a/crypto/asymmetric_keys/rsa.c
+++ b/crypto/asymmetric_keys/rsa.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <crypto/algapi.h>
16#include "public_key.h" 17#include "public_key.h"
17 18
18MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
@@ -73,13 +74,13 @@ static const struct {
73 size_t size; 74 size_t size;
74} RSA_ASN1_templates[PKEY_HASH__LAST] = { 75} RSA_ASN1_templates[PKEY_HASH__LAST] = {
75#define _(X) { RSA_digest_info_##X, sizeof(RSA_digest_info_##X) } 76#define _(X) { RSA_digest_info_##X, sizeof(RSA_digest_info_##X) }
76 [PKEY_HASH_MD5] = _(MD5), 77 [HASH_ALGO_MD5] = _(MD5),
77 [PKEY_HASH_SHA1] = _(SHA1), 78 [HASH_ALGO_SHA1] = _(SHA1),
78 [PKEY_HASH_RIPE_MD_160] = _(RIPE_MD_160), 79 [HASH_ALGO_RIPE_MD_160] = _(RIPE_MD_160),
79 [PKEY_HASH_SHA256] = _(SHA256), 80 [HASH_ALGO_SHA256] = _(SHA256),
80 [PKEY_HASH_SHA384] = _(SHA384), 81 [HASH_ALGO_SHA384] = _(SHA384),
81 [PKEY_HASH_SHA512] = _(SHA512), 82 [HASH_ALGO_SHA512] = _(SHA512),
82 [PKEY_HASH_SHA224] = _(SHA224), 83 [HASH_ALGO_SHA224] = _(SHA224),
83#undef _ 84#undef _
84}; 85};
85 86
@@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size,
189 } 190 }
190 } 191 }
191 192
192 if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) { 193 if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) {
193 kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]"); 194 kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]");
194 return -EBADMSG; 195 return -EBADMSG;
195 } 196 }
196 197
197 if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) { 198 if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) {
198 kleave(" = -EKEYREJECTED [EM[T] hash mismatch]"); 199 kleave(" = -EKEYREJECTED [EM[T] hash mismatch]");
199 return -EKEYREJECTED; 200 return -EKEYREJECTED;
200 } 201 }
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index facbf26bc6bb..29893162497c 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -47,6 +47,8 @@ void x509_free_certificate(struct x509_certificate *cert)
47 kfree(cert->subject); 47 kfree(cert->subject);
48 kfree(cert->fingerprint); 48 kfree(cert->fingerprint);
49 kfree(cert->authority); 49 kfree(cert->authority);
50 kfree(cert->sig.digest);
51 mpi_free(cert->sig.rsa.s);
50 kfree(cert); 52 kfree(cert);
51 } 53 }
52} 54}
@@ -152,33 +154,33 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
152 return -ENOPKG; /* Unsupported combination */ 154 return -ENOPKG; /* Unsupported combination */
153 155
154 case OID_md4WithRSAEncryption: 156 case OID_md4WithRSAEncryption:
155 ctx->cert->sig_hash_algo = PKEY_HASH_MD5; 157 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_MD5;
156 ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; 158 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
157 break; 159 break;
158 160
159 case OID_sha1WithRSAEncryption: 161 case OID_sha1WithRSAEncryption:
160 ctx->cert->sig_hash_algo = PKEY_HASH_SHA1; 162 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA1;
161 ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; 163 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
162 break; 164 break;
163 165
164 case OID_sha256WithRSAEncryption: 166 case OID_sha256WithRSAEncryption:
165 ctx->cert->sig_hash_algo = PKEY_HASH_SHA256; 167 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA256;
166 ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; 168 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
167 break; 169 break;
168 170
169 case OID_sha384WithRSAEncryption: 171 case OID_sha384WithRSAEncryption:
170 ctx->cert->sig_hash_algo = PKEY_HASH_SHA384; 172 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA384;
171 ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; 173 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
172 break; 174 break;
173 175
174 case OID_sha512WithRSAEncryption: 176 case OID_sha512WithRSAEncryption:
175 ctx->cert->sig_hash_algo = PKEY_HASH_SHA512; 177 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA512;
176 ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; 178 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
177 break; 179 break;
178 180
179 case OID_sha224WithRSAEncryption: 181 case OID_sha224WithRSAEncryption:
180 ctx->cert->sig_hash_algo = PKEY_HASH_SHA224; 182 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA224;
181 ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; 183 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
182 break; 184 break;
183 } 185 }
184 186
@@ -203,8 +205,8 @@ int x509_note_signature(void *context, size_t hdrlen,
203 return -EINVAL; 205 return -EINVAL;
204 } 206 }
205 207
206 ctx->cert->sig = value; 208 ctx->cert->raw_sig = value;
207 ctx->cert->sig_size = vlen; 209 ctx->cert->raw_sig_size = vlen;
208 return 0; 210 return 0;
209} 211}
210 212
@@ -343,8 +345,9 @@ int x509_extract_key_data(void *context, size_t hdrlen,
343 if (ctx->last_oid != OID_rsaEncryption) 345 if (ctx->last_oid != OID_rsaEncryption)
344 return -ENOPKG; 346 return -ENOPKG;
345 347
346 /* There seems to be an extraneous 0 byte on the front of the data */ 348 ctx->cert->pub->pkey_algo = PKEY_ALGO_RSA;
347 ctx->cert->pkey_algo = PKEY_ALGO_RSA; 349
350 /* Discard the BIT STRING metadata */
348 ctx->key = value + 1; 351 ctx->key = value + 1;
349 ctx->key_size = vlen - 1; 352 ctx->key_size = vlen - 1;
350 return 0; 353 return 0;
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index f86dc5fcc4ad..87d9cc26f630 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -9,6 +9,7 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/time.h>
12#include <crypto/public_key.h> 13#include <crypto/public_key.h>
13 14
14struct x509_certificate { 15struct x509_certificate {
@@ -20,13 +21,11 @@ struct x509_certificate {
20 char *authority; /* Authority key fingerprint as hex */ 21 char *authority; /* Authority key fingerprint as hex */
21 struct tm valid_from; 22 struct tm valid_from;
22 struct tm valid_to; 23 struct tm valid_to;
23 enum pkey_algo pkey_algo : 8; /* Public key algorithm */
24 enum pkey_algo sig_pkey_algo : 8; /* Signature public key algorithm */
25 enum pkey_hash_algo sig_hash_algo : 8; /* Signature hash algorithm */
26 const void *tbs; /* Signed data */ 24 const void *tbs; /* Signed data */
27 size_t tbs_size; /* Size of signed data */ 25 unsigned tbs_size; /* Size of signed data */
28 const void *sig; /* Signature data */ 26 unsigned raw_sig_size; /* Size of sigature */
29 size_t sig_size; /* Size of sigature */ 27 const void *raw_sig; /* Signature data */
28 struct public_key_signature sig; /* Signature parameters */
30}; 29};
31 30
32/* 31/*
@@ -34,3 +33,10 @@ struct x509_certificate {
34 */ 33 */
35extern void x509_free_certificate(struct x509_certificate *cert); 34extern void x509_free_certificate(struct x509_certificate *cert);
36extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen); 35extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen);
36
37/*
38 * x509_public_key.c
39 */
40extern int x509_get_sig_params(struct x509_certificate *cert);
41extern int x509_check_signature(const struct public_key *pub,
42 struct x509_certificate *cert);
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 06007f0e880c..382ef0d2ff2e 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -23,82 +23,84 @@
23#include "public_key.h" 23#include "public_key.h"
24#include "x509_parser.h" 24#include "x509_parser.h"
25 25
26static const
27struct public_key_algorithm *x509_public_key_algorithms[PKEY_ALGO__LAST] = {
28 [PKEY_ALGO_DSA] = NULL,
29#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \
30 defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE)
31 [PKEY_ALGO_RSA] = &RSA_public_key_algorithm,
32#endif
33};
34
35/* 26/*
36 * Check the signature on a certificate using the provided public key 27 * Set up the signature parameters in an X.509 certificate. This involves
28 * digesting the signed data and extracting the signature.
37 */ 29 */
38static int x509_check_signature(const struct public_key *pub, 30int x509_get_sig_params(struct x509_certificate *cert)
39 const struct x509_certificate *cert)
40{ 31{
41 struct public_key_signature *sig;
42 struct crypto_shash *tfm; 32 struct crypto_shash *tfm;
43 struct shash_desc *desc; 33 struct shash_desc *desc;
44 size_t digest_size, desc_size; 34 size_t digest_size, desc_size;
35 void *digest;
45 int ret; 36 int ret;
46 37
47 pr_devel("==>%s()\n", __func__); 38 pr_devel("==>%s()\n", __func__);
48 39
40 if (cert->sig.rsa.s)
41 return 0;
42
43 cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size);
44 if (!cert->sig.rsa.s)
45 return -ENOMEM;
46 cert->sig.nr_mpi = 1;
47
49 /* Allocate the hashing algorithm we're going to need and find out how 48 /* Allocate the hashing algorithm we're going to need and find out how
50 * big the hash operational data will be. 49 * big the hash operational data will be.
51 */ 50 */
52 tfm = crypto_alloc_shash(pkey_hash_algo[cert->sig_hash_algo], 0, 0); 51 tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0);
53 if (IS_ERR(tfm)) 52 if (IS_ERR(tfm))
54 return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); 53 return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
55 54
56 desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); 55 desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
57 digest_size = crypto_shash_digestsize(tfm); 56 digest_size = crypto_shash_digestsize(tfm);
58 57
59 /* We allocate the hash operational data storage on the end of our 58 /* We allocate the hash operational data storage on the end of the
60 * context data. 59 * digest storage space.
61 */ 60 */
62 ret = -ENOMEM; 61 ret = -ENOMEM;
63 sig = kzalloc(sizeof(*sig) + desc_size + digest_size, GFP_KERNEL); 62 digest = kzalloc(digest_size + desc_size, GFP_KERNEL);
64 if (!sig) 63 if (!digest)
65 goto error_no_sig; 64 goto error;
66 65
67 sig->pkey_hash_algo = cert->sig_hash_algo; 66 cert->sig.digest = digest;
68 sig->digest = (u8 *)sig + sizeof(*sig) + desc_size; 67 cert->sig.digest_size = digest_size;
69 sig->digest_size = digest_size;
70 68
71 desc = (void *)sig + sizeof(*sig); 69 desc = digest + digest_size;
72 desc->tfm = tfm; 70 desc->tfm = tfm;
73 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 71 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
74 72
75 ret = crypto_shash_init(desc); 73 ret = crypto_shash_init(desc);
76 if (ret < 0) 74 if (ret < 0)
77 goto error; 75 goto error;
76 might_sleep();
77 ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest);
78error:
79 crypto_free_shash(tfm);
80 pr_devel("<==%s() = %d\n", __func__, ret);
81 return ret;
82}
83EXPORT_SYMBOL_GPL(x509_get_sig_params);
78 84
79 ret = -ENOMEM; 85/*
80 sig->rsa.s = mpi_read_raw_data(cert->sig, cert->sig_size); 86 * Check the signature on a certificate using the provided public key
81 if (!sig->rsa.s) 87 */
82 goto error; 88int x509_check_signature(const struct public_key *pub,
89 struct x509_certificate *cert)
90{
91 int ret;
83 92
84 ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest); 93 pr_devel("==>%s()\n", __func__);
85 if (ret < 0)
86 goto error_mpi;
87 94
88 ret = pub->algo->verify_signature(pub, sig); 95 ret = x509_get_sig_params(cert);
96 if (ret < 0)
97 return ret;
89 98
99 ret = public_key_verify_signature(pub, &cert->sig);
90 pr_debug("Cert Verification: %d\n", ret); 100 pr_debug("Cert Verification: %d\n", ret);
91
92error_mpi:
93 mpi_free(sig->rsa.s);
94error:
95 kfree(sig);
96error_no_sig:
97 crypto_free_shash(tfm);
98
99 pr_devel("<==%s() = %d\n", __func__, ret);
100 return ret; 101 return ret;
101} 102}
103EXPORT_SYMBOL_GPL(x509_check_signature);
102 104
103/* 105/*
104 * Attempt to parse a data blob for a key as an X509 certificate. 106 * Attempt to parse a data blob for a key as an X509 certificate.
@@ -106,7 +108,6 @@ error_no_sig:
106static int x509_key_preparse(struct key_preparsed_payload *prep) 108static int x509_key_preparse(struct key_preparsed_payload *prep)
107{ 109{
108 struct x509_certificate *cert; 110 struct x509_certificate *cert;
109 struct tm now;
110 size_t srlen, sulen; 111 size_t srlen, sulen;
111 char *desc = NULL; 112 char *desc = NULL;
112 int ret; 113 int ret;
@@ -117,7 +118,18 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
117 118
118 pr_devel("Cert Issuer: %s\n", cert->issuer); 119 pr_devel("Cert Issuer: %s\n", cert->issuer);
119 pr_devel("Cert Subject: %s\n", cert->subject); 120 pr_devel("Cert Subject: %s\n", cert->subject);
120 pr_devel("Cert Key Algo: %s\n", pkey_algo[cert->pkey_algo]); 121
122 if (cert->pub->pkey_algo >= PKEY_ALGO__LAST ||
123 cert->sig.pkey_algo >= PKEY_ALGO__LAST ||
124 cert->sig.pkey_hash_algo >= PKEY_HASH__LAST ||
125 !pkey_algo[cert->pub->pkey_algo] ||
126 !pkey_algo[cert->sig.pkey_algo] ||
127 !hash_algo_name[cert->sig.pkey_hash_algo]) {
128 ret = -ENOPKG;
129 goto error_free_cert;
130 }
131
132 pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]);
121 pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n", 133 pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n",
122 cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1, 134 cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1,
123 cert->valid_from.tm_mday, cert->valid_from.tm_hour, 135 cert->valid_from.tm_mday, cert->valid_from.tm_hour,
@@ -127,58 +139,22 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
127 cert->valid_to.tm_mday, cert->valid_to.tm_hour, 139 cert->valid_to.tm_mday, cert->valid_to.tm_hour,
128 cert->valid_to.tm_min, cert->valid_to.tm_sec); 140 cert->valid_to.tm_min, cert->valid_to.tm_sec);
129 pr_devel("Cert Signature: %s + %s\n", 141 pr_devel("Cert Signature: %s + %s\n",
130 pkey_algo[cert->sig_pkey_algo], 142 pkey_algo_name[cert->sig.pkey_algo],
131 pkey_hash_algo[cert->sig_hash_algo]); 143 hash_algo_name[cert->sig.pkey_hash_algo]);
132 144
133 if (!cert->fingerprint || !cert->authority) { 145 if (!cert->fingerprint) {
134 pr_warn("Cert for '%s' must have SubjKeyId and AuthKeyId extensions\n", 146 pr_warn("Cert for '%s' must have a SubjKeyId extension\n",
135 cert->subject); 147 cert->subject);
136 ret = -EKEYREJECTED; 148 ret = -EKEYREJECTED;
137 goto error_free_cert; 149 goto error_free_cert;
138 } 150 }
139 151
140 time_to_tm(CURRENT_TIME.tv_sec, 0, &now); 152 cert->pub->algo = pkey_algo[cert->pub->pkey_algo];
141 pr_devel("Now: %04ld-%02d-%02d %02d:%02d:%02d\n",
142 now.tm_year + 1900, now.tm_mon + 1, now.tm_mday,
143 now.tm_hour, now.tm_min, now.tm_sec);
144 if (now.tm_year < cert->valid_from.tm_year ||
145 (now.tm_year == cert->valid_from.tm_year &&
146 (now.tm_mon < cert->valid_from.tm_mon ||
147 (now.tm_mon == cert->valid_from.tm_mon &&
148 (now.tm_mday < cert->valid_from.tm_mday ||
149 (now.tm_mday == cert->valid_from.tm_mday &&
150 (now.tm_hour < cert->valid_from.tm_hour ||
151 (now.tm_hour == cert->valid_from.tm_hour &&
152 (now.tm_min < cert->valid_from.tm_min ||
153 (now.tm_min == cert->valid_from.tm_min &&
154 (now.tm_sec < cert->valid_from.tm_sec
155 ))))))))))) {
156 pr_warn("Cert %s is not yet valid\n", cert->fingerprint);
157 ret = -EKEYREJECTED;
158 goto error_free_cert;
159 }
160 if (now.tm_year > cert->valid_to.tm_year ||
161 (now.tm_year == cert->valid_to.tm_year &&
162 (now.tm_mon > cert->valid_to.tm_mon ||
163 (now.tm_mon == cert->valid_to.tm_mon &&
164 (now.tm_mday > cert->valid_to.tm_mday ||
165 (now.tm_mday == cert->valid_to.tm_mday &&
166 (now.tm_hour > cert->valid_to.tm_hour ||
167 (now.tm_hour == cert->valid_to.tm_hour &&
168 (now.tm_min > cert->valid_to.tm_min ||
169 (now.tm_min == cert->valid_to.tm_min &&
170 (now.tm_sec > cert->valid_to.tm_sec
171 ))))))))))) {
172 pr_warn("Cert %s has expired\n", cert->fingerprint);
173 ret = -EKEYEXPIRED;
174 goto error_free_cert;
175 }
176
177 cert->pub->algo = x509_public_key_algorithms[cert->pkey_algo];
178 cert->pub->id_type = PKEY_ID_X509; 153 cert->pub->id_type = PKEY_ID_X509;
179 154
180 /* Check the signature on the key */ 155 /* Check the signature on the key if it appears to be self-signed */
181 if (strcmp(cert->fingerprint, cert->authority) == 0) { 156 if (!cert->authority ||
157 strcmp(cert->fingerprint, cert->authority) == 0) {
182 ret = x509_check_signature(cert->pub, cert); 158 ret = x509_check_signature(cert->pub, cert);
183 if (ret < 0) 159 if (ret < 0)
184 goto error_free_cert; 160 goto error_free_cert;
@@ -237,3 +213,6 @@ static void __exit x509_key_exit(void)
237 213
238module_init(x509_key_init); 214module_init(x509_key_init);
239module_exit(x509_key_exit); 215module_exit(x509_key_exit);
216
217MODULE_DESCRIPTION("X.509 certificate parser");
218MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 9e62feffb374..f8c0b8dbeb75 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -50,33 +50,36 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
50 &dest, 1, &src, 1, len); 50 &dest, 1, &src, 1, len);
51 struct dma_device *device = chan ? chan->device : NULL; 51 struct dma_device *device = chan ? chan->device : NULL;
52 struct dma_async_tx_descriptor *tx = NULL; 52 struct dma_async_tx_descriptor *tx = NULL;
53 struct dmaengine_unmap_data *unmap = NULL;
53 54
54 if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { 55 if (device)
55 dma_addr_t dma_dest, dma_src; 56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
57
58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
56 unsigned long dma_prep_flags = 0; 59 unsigned long dma_prep_flags = 0;
57 60
58 if (submit->cb_fn) 61 if (submit->cb_fn)
59 dma_prep_flags |= DMA_PREP_INTERRUPT; 62 dma_prep_flags |= DMA_PREP_INTERRUPT;
60 if (submit->flags & ASYNC_TX_FENCE) 63 if (submit->flags & ASYNC_TX_FENCE)
61 dma_prep_flags |= DMA_PREP_FENCE; 64 dma_prep_flags |= DMA_PREP_FENCE;
62 dma_dest = dma_map_page(device->dev, dest, dest_offset, len, 65
63 DMA_FROM_DEVICE); 66 unmap->to_cnt = 1;
64 67 unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len,
65 dma_src = dma_map_page(device->dev, src, src_offset, len, 68 DMA_TO_DEVICE);
66 DMA_TO_DEVICE); 69 unmap->from_cnt = 1;
67 70 unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len,
68 tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, 71 DMA_FROM_DEVICE);
69 len, dma_prep_flags); 72 unmap->len = len;
70 if (!tx) { 73
71 dma_unmap_page(device->dev, dma_dest, len, 74 tx = device->device_prep_dma_memcpy(chan, unmap->addr[1],
72 DMA_FROM_DEVICE); 75 unmap->addr[0], len,
73 dma_unmap_page(device->dev, dma_src, len, 76 dma_prep_flags);
74 DMA_TO_DEVICE);
75 }
76 } 77 }
77 78
78 if (tx) { 79 if (tx) {
79 pr_debug("%s: (async) len: %zu\n", __func__, len); 80 pr_debug("%s: (async) len: %zu\n", __func__, len);
81
82 dma_set_unmap(tx, unmap);
80 async_tx_submit(chan, tx, submit); 83 async_tx_submit(chan, tx, submit);
81 } else { 84 } else {
82 void *dest_buf, *src_buf; 85 void *dest_buf, *src_buf;
@@ -96,6 +99,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
96 async_tx_sync_epilog(submit); 99 async_tx_sync_epilog(submit);
97 } 100 }
98 101
102 dmaengine_unmap_put(unmap);
103
99 return tx; 104 return tx;
100} 105}
101EXPORT_SYMBOL_GPL(async_memcpy); 106EXPORT_SYMBOL_GPL(async_memcpy);
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 91d5d385899e..d05327caf69d 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -46,49 +46,24 @@ static struct page *pq_scribble_page;
46 * do_async_gen_syndrome - asynchronously calculate P and/or Q 46 * do_async_gen_syndrome - asynchronously calculate P and/or Q
47 */ 47 */
48static __async_inline struct dma_async_tx_descriptor * 48static __async_inline struct dma_async_tx_descriptor *
49do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, 49do_async_gen_syndrome(struct dma_chan *chan,
50 const unsigned char *scfs, unsigned int offset, int disks, 50 const unsigned char *scfs, int disks,
51 size_t len, dma_addr_t *dma_src, 51 struct dmaengine_unmap_data *unmap,
52 enum dma_ctrl_flags dma_flags,
52 struct async_submit_ctl *submit) 53 struct async_submit_ctl *submit)
53{ 54{
54 struct dma_async_tx_descriptor *tx = NULL; 55 struct dma_async_tx_descriptor *tx = NULL;
55 struct dma_device *dma = chan->device; 56 struct dma_device *dma = chan->device;
56 enum dma_ctrl_flags dma_flags = 0;
57 enum async_tx_flags flags_orig = submit->flags; 57 enum async_tx_flags flags_orig = submit->flags;
58 dma_async_tx_callback cb_fn_orig = submit->cb_fn; 58 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
59 dma_async_tx_callback cb_param_orig = submit->cb_param; 59 dma_async_tx_callback cb_param_orig = submit->cb_param;
60 int src_cnt = disks - 2; 60 int src_cnt = disks - 2;
61 unsigned char coefs[src_cnt];
62 unsigned short pq_src_cnt; 61 unsigned short pq_src_cnt;
63 dma_addr_t dma_dest[2]; 62 dma_addr_t dma_dest[2];
64 int src_off = 0; 63 int src_off = 0;
65 int idx;
66 int i;
67 64
68 /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ 65 if (submit->flags & ASYNC_TX_FENCE)
69 if (P(blocks, disks)) 66 dma_flags |= DMA_PREP_FENCE;
70 dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
71 len, DMA_BIDIRECTIONAL);
72 else
73 dma_flags |= DMA_PREP_PQ_DISABLE_P;
74 if (Q(blocks, disks))
75 dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
76 len, DMA_BIDIRECTIONAL);
77 else
78 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
79
80 /* convert source addresses being careful to collapse 'empty'
81 * sources and update the coefficients accordingly
82 */
83 for (i = 0, idx = 0; i < src_cnt; i++) {
84 if (blocks[i] == NULL)
85 continue;
86 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
87 DMA_TO_DEVICE);
88 coefs[idx] = scfs[i];
89 idx++;
90 }
91 src_cnt = idx;
92 67
93 while (src_cnt > 0) { 68 while (src_cnt > 0) {
94 submit->flags = flags_orig; 69 submit->flags = flags_orig;
@@ -100,28 +75,25 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
100 if (src_cnt > pq_src_cnt) { 75 if (src_cnt > pq_src_cnt) {
101 submit->flags &= ~ASYNC_TX_ACK; 76 submit->flags &= ~ASYNC_TX_ACK;
102 submit->flags |= ASYNC_TX_FENCE; 77 submit->flags |= ASYNC_TX_FENCE;
103 dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
104 submit->cb_fn = NULL; 78 submit->cb_fn = NULL;
105 submit->cb_param = NULL; 79 submit->cb_param = NULL;
106 } else { 80 } else {
107 dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
108 submit->cb_fn = cb_fn_orig; 81 submit->cb_fn = cb_fn_orig;
109 submit->cb_param = cb_param_orig; 82 submit->cb_param = cb_param_orig;
110 if (cb_fn_orig) 83 if (cb_fn_orig)
111 dma_flags |= DMA_PREP_INTERRUPT; 84 dma_flags |= DMA_PREP_INTERRUPT;
112 } 85 }
113 if (submit->flags & ASYNC_TX_FENCE)
114 dma_flags |= DMA_PREP_FENCE;
115 86
116 /* Since we have clobbered the src_list we are committed 87 /* Drivers force forward progress in case they can not provide
117 * to doing this asynchronously. Drivers force forward 88 * a descriptor
118 * progress in case they can not provide a descriptor
119 */ 89 */
120 for (;;) { 90 for (;;) {
91 dma_dest[0] = unmap->addr[disks - 2];
92 dma_dest[1] = unmap->addr[disks - 1];
121 tx = dma->device_prep_dma_pq(chan, dma_dest, 93 tx = dma->device_prep_dma_pq(chan, dma_dest,
122 &dma_src[src_off], 94 &unmap->addr[src_off],
123 pq_src_cnt, 95 pq_src_cnt,
124 &coefs[src_off], len, 96 &scfs[src_off], unmap->len,
125 dma_flags); 97 dma_flags);
126 if (likely(tx)) 98 if (likely(tx))
127 break; 99 break;
@@ -129,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
129 dma_async_issue_pending(chan); 101 dma_async_issue_pending(chan);
130 } 102 }
131 103
104 dma_set_unmap(tx, unmap);
132 async_tx_submit(chan, tx, submit); 105 async_tx_submit(chan, tx, submit);
133 submit->depend_tx = tx; 106 submit->depend_tx = tx;
134 107
@@ -188,10 +161,6 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
188 * set to NULL those buffers will be replaced with the raid6_zero_page 161 * set to NULL those buffers will be replaced with the raid6_zero_page
189 * in the synchronous path and omitted in the hardware-asynchronous 162 * in the synchronous path and omitted in the hardware-asynchronous
190 * path. 163 * path.
191 *
192 * 'blocks' note: if submit->scribble is NULL then the contents of
193 * 'blocks' may be overwritten to perform address conversions
194 * (dma_map_page() or page_address()).
195 */ 164 */
196struct dma_async_tx_descriptor * 165struct dma_async_tx_descriptor *
197async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, 166async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
@@ -202,26 +171,69 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
202 &P(blocks, disks), 2, 171 &P(blocks, disks), 2,
203 blocks, src_cnt, len); 172 blocks, src_cnt, len);
204 struct dma_device *device = chan ? chan->device : NULL; 173 struct dma_device *device = chan ? chan->device : NULL;
205 dma_addr_t *dma_src = NULL; 174 struct dmaengine_unmap_data *unmap = NULL;
206 175
207 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); 176 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
208 177
209 if (submit->scribble) 178 if (device)
210 dma_src = submit->scribble; 179 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
211 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
212 dma_src = (dma_addr_t *) blocks;
213 180
214 if (dma_src && device && 181 if (unmap &&
215 (src_cnt <= dma_maxpq(device, 0) || 182 (src_cnt <= dma_maxpq(device, 0) ||
216 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && 183 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
217 is_dma_pq_aligned(device, offset, 0, len)) { 184 is_dma_pq_aligned(device, offset, 0, len)) {
185 struct dma_async_tx_descriptor *tx;
186 enum dma_ctrl_flags dma_flags = 0;
187 unsigned char coefs[src_cnt];
188 int i, j;
189
218 /* run the p+q asynchronously */ 190 /* run the p+q asynchronously */
219 pr_debug("%s: (async) disks: %d len: %zu\n", 191 pr_debug("%s: (async) disks: %d len: %zu\n",
220 __func__, disks, len); 192 __func__, disks, len);
221 return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, 193
222 disks, len, dma_src, submit); 194 /* convert source addresses being careful to collapse 'empty'
195 * sources and update the coefficients accordingly
196 */
197 unmap->len = len;
198 for (i = 0, j = 0; i < src_cnt; i++) {
199 if (blocks[i] == NULL)
200 continue;
201 unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
202 len, DMA_TO_DEVICE);
203 coefs[j] = raid6_gfexp[i];
204 unmap->to_cnt++;
205 j++;
206 }
207
208 /*
209 * DMAs use destinations as sources,
210 * so use BIDIRECTIONAL mapping
211 */
212 unmap->bidi_cnt++;
213 if (P(blocks, disks))
214 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
215 offset, len, DMA_BIDIRECTIONAL);
216 else {
217 unmap->addr[j++] = 0;
218 dma_flags |= DMA_PREP_PQ_DISABLE_P;
219 }
220
221 unmap->bidi_cnt++;
222 if (Q(blocks, disks))
223 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
224 offset, len, DMA_BIDIRECTIONAL);
225 else {
226 unmap->addr[j++] = 0;
227 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
228 }
229
230 tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
231 dmaengine_unmap_put(unmap);
232 return tx;
223 } 233 }
224 234
235 dmaengine_unmap_put(unmap);
236
225 /* run the pq synchronously */ 237 /* run the pq synchronously */
226 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); 238 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
227 239
@@ -277,50 +289,60 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
277 struct dma_async_tx_descriptor *tx; 289 struct dma_async_tx_descriptor *tx;
278 unsigned char coefs[disks-2]; 290 unsigned char coefs[disks-2];
279 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 291 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
280 dma_addr_t *dma_src = NULL; 292 struct dmaengine_unmap_data *unmap = NULL;
281 int src_cnt = 0;
282 293
283 BUG_ON(disks < 4); 294 BUG_ON(disks < 4);
284 295
285 if (submit->scribble) 296 if (device)
286 dma_src = submit->scribble; 297 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
287 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
288 dma_src = (dma_addr_t *) blocks;
289 298
290 if (dma_src && device && disks <= dma_maxpq(device, 0) && 299 if (unmap && disks <= dma_maxpq(device, 0) &&
291 is_dma_pq_aligned(device, offset, 0, len)) { 300 is_dma_pq_aligned(device, offset, 0, len)) {
292 struct device *dev = device->dev; 301 struct device *dev = device->dev;
293 dma_addr_t *pq = &dma_src[disks-2]; 302 dma_addr_t pq[2];
294 int i; 303 int i, j = 0, src_cnt = 0;
295 304
296 pr_debug("%s: (async) disks: %d len: %zu\n", 305 pr_debug("%s: (async) disks: %d len: %zu\n",
297 __func__, disks, len); 306 __func__, disks, len);
298 if (!P(blocks, disks)) 307
308 unmap->len = len;
309 for (i = 0; i < disks-2; i++)
310 if (likely(blocks[i])) {
311 unmap->addr[j] = dma_map_page(dev, blocks[i],
312 offset, len,
313 DMA_TO_DEVICE);
314 coefs[j] = raid6_gfexp[i];
315 unmap->to_cnt++;
316 src_cnt++;
317 j++;
318 }
319
320 if (!P(blocks, disks)) {
321 pq[0] = 0;
299 dma_flags |= DMA_PREP_PQ_DISABLE_P; 322 dma_flags |= DMA_PREP_PQ_DISABLE_P;
300 else 323 } else {
301 pq[0] = dma_map_page(dev, P(blocks, disks), 324 pq[0] = dma_map_page(dev, P(blocks, disks),
302 offset, len, 325 offset, len,
303 DMA_TO_DEVICE); 326 DMA_TO_DEVICE);
304 if (!Q(blocks, disks)) 327 unmap->addr[j++] = pq[0];
328 unmap->to_cnt++;
329 }
330 if (!Q(blocks, disks)) {
331 pq[1] = 0;
305 dma_flags |= DMA_PREP_PQ_DISABLE_Q; 332 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
306 else 333 } else {
307 pq[1] = dma_map_page(dev, Q(blocks, disks), 334 pq[1] = dma_map_page(dev, Q(blocks, disks),
308 offset, len, 335 offset, len,
309 DMA_TO_DEVICE); 336 DMA_TO_DEVICE);
337 unmap->addr[j++] = pq[1];
338 unmap->to_cnt++;
339 }
310 340
311 if (submit->flags & ASYNC_TX_FENCE) 341 if (submit->flags & ASYNC_TX_FENCE)
312 dma_flags |= DMA_PREP_FENCE; 342 dma_flags |= DMA_PREP_FENCE;
313 for (i = 0; i < disks-2; i++)
314 if (likely(blocks[i])) {
315 dma_src[src_cnt] = dma_map_page(dev, blocks[i],
316 offset, len,
317 DMA_TO_DEVICE);
318 coefs[src_cnt] = raid6_gfexp[i];
319 src_cnt++;
320 }
321
322 for (;;) { 343 for (;;) {
323 tx = device->device_prep_dma_pq_val(chan, pq, dma_src, 344 tx = device->device_prep_dma_pq_val(chan, pq,
345 unmap->addr,
324 src_cnt, 346 src_cnt,
325 coefs, 347 coefs,
326 len, pqres, 348 len, pqres,
@@ -330,6 +352,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
330 async_tx_quiesce(&submit->depend_tx); 352 async_tx_quiesce(&submit->depend_tx);
331 dma_async_issue_pending(chan); 353 dma_async_issue_pending(chan);
332 } 354 }
355
356 dma_set_unmap(tx, unmap);
333 async_tx_submit(chan, tx, submit); 357 async_tx_submit(chan, tx, submit);
334 358
335 return tx; 359 return tx;
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index a9f08a6a582e..934a84981495 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -26,6 +26,7 @@
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/raid/pq.h> 27#include <linux/raid/pq.h>
28#include <linux/async_tx.h> 28#include <linux/async_tx.h>
29#include <linux/dmaengine.h>
29 30
30static struct dma_async_tx_descriptor * 31static struct dma_async_tx_descriptor *
31async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, 32async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
@@ -34,35 +35,45 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
34 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, 35 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
35 &dest, 1, srcs, 2, len); 36 &dest, 1, srcs, 2, len);
36 struct dma_device *dma = chan ? chan->device : NULL; 37 struct dma_device *dma = chan ? chan->device : NULL;
38 struct dmaengine_unmap_data *unmap = NULL;
37 const u8 *amul, *bmul; 39 const u8 *amul, *bmul;
38 u8 ax, bx; 40 u8 ax, bx;
39 u8 *a, *b, *c; 41 u8 *a, *b, *c;
40 42
41 if (dma) { 43 if (dma)
42 dma_addr_t dma_dest[2]; 44 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
43 dma_addr_t dma_src[2]; 45
46 if (unmap) {
44 struct device *dev = dma->dev; 47 struct device *dev = dma->dev;
48 dma_addr_t pq[2];
45 struct dma_async_tx_descriptor *tx; 49 struct dma_async_tx_descriptor *tx;
46 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; 50 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
47 51
48 if (submit->flags & ASYNC_TX_FENCE) 52 if (submit->flags & ASYNC_TX_FENCE)
49 dma_flags |= DMA_PREP_FENCE; 53 dma_flags |= DMA_PREP_FENCE;
50 dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); 54 unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
51 dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); 55 unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
52 dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); 56 unmap->to_cnt = 2;
53 tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, 57
58 unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
59 unmap->bidi_cnt = 1;
60 /* engine only looks at Q, but expects it to follow P */
61 pq[1] = unmap->addr[2];
62
63 unmap->len = len;
64 tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,
54 len, dma_flags); 65 len, dma_flags);
55 if (tx) { 66 if (tx) {
67 dma_set_unmap(tx, unmap);
56 async_tx_submit(chan, tx, submit); 68 async_tx_submit(chan, tx, submit);
69 dmaengine_unmap_put(unmap);
57 return tx; 70 return tx;
58 } 71 }
59 72
60 /* could not get a descriptor, unmap and fall through to 73 /* could not get a descriptor, unmap and fall through to
61 * the synchronous path 74 * the synchronous path
62 */ 75 */
63 dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); 76 dmaengine_unmap_put(unmap);
64 dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
65 dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
66 } 77 }
67 78
68 /* run the operation synchronously */ 79 /* run the operation synchronously */
@@ -89,23 +100,38 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
89 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, 100 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
90 &dest, 1, &src, 1, len); 101 &dest, 1, &src, 1, len);
91 struct dma_device *dma = chan ? chan->device : NULL; 102 struct dma_device *dma = chan ? chan->device : NULL;
103 struct dmaengine_unmap_data *unmap = NULL;
92 const u8 *qmul; /* Q multiplier table */ 104 const u8 *qmul; /* Q multiplier table */
93 u8 *d, *s; 105 u8 *d, *s;
94 106
95 if (dma) { 107 if (dma)
108 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
109
110 if (unmap) {
96 dma_addr_t dma_dest[2]; 111 dma_addr_t dma_dest[2];
97 dma_addr_t dma_src[1];
98 struct device *dev = dma->dev; 112 struct device *dev = dma->dev;
99 struct dma_async_tx_descriptor *tx; 113 struct dma_async_tx_descriptor *tx;
100 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; 114 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
101 115
102 if (submit->flags & ASYNC_TX_FENCE) 116 if (submit->flags & ASYNC_TX_FENCE)
103 dma_flags |= DMA_PREP_FENCE; 117 dma_flags |= DMA_PREP_FENCE;
104 dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); 118 unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
105 dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); 119 unmap->to_cnt++;
106 tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, 120 unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
107 len, dma_flags); 121 dma_dest[1] = unmap->addr[1];
122 unmap->bidi_cnt++;
123 unmap->len = len;
124
125 /* this looks funny, but the engine looks for Q at
126 * dma_dest[1] and ignores dma_dest[0] as a dest
127 * due to DMA_PREP_PQ_DISABLE_P
128 */
129 tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr,
130 1, &coef, len, dma_flags);
131
108 if (tx) { 132 if (tx) {
133 dma_set_unmap(tx, unmap);
134 dmaengine_unmap_put(unmap);
109 async_tx_submit(chan, tx, submit); 135 async_tx_submit(chan, tx, submit);
110 return tx; 136 return tx;
111 } 137 }
@@ -113,8 +139,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
113 /* could not get a descriptor, unmap and fall through to 139 /* could not get a descriptor, unmap and fall through to
114 * the synchronous path 140 * the synchronous path
115 */ 141 */
116 dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); 142 dmaengine_unmap_put(unmap);
117 dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
118 } 143 }
119 144
120 /* no channel available, or failed to allocate a descriptor, so 145 /* no channel available, or failed to allocate a descriptor, so
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 7be34248b450..39ea4791a3c9 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
128 } 128 }
129 device->device_issue_pending(chan); 129 device->device_issue_pending(chan);
130 } else { 130 } else {
131 if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS) 131 if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
132 panic("%s: DMA error waiting for depend_tx\n", 132 panic("%s: DMA error waiting for depend_tx\n",
133 __func__); 133 __func__);
134 tx->tx_submit(tx); 134 tx->tx_submit(tx);
@@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
280 * we are referring to the correct operation 280 * we are referring to the correct operation
281 */ 281 */
282 BUG_ON(async_tx_test_ack(*tx)); 282 BUG_ON(async_tx_test_ack(*tx));
283 if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS) 283 if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
284 panic("%s: DMA error waiting for transaction\n", 284 panic("%s: DMA error waiting for transaction\n",
285 __func__); 285 __func__);
286 async_tx_ack(*tx); 286 async_tx_ack(*tx);
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 8ade0a0481c6..3c562f5a60bb 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -33,48 +33,31 @@
33 33
34/* do_async_xor - dma map the pages and perform the xor with an engine */ 34/* do_async_xor - dma map the pages and perform the xor with an engine */
35static __async_inline struct dma_async_tx_descriptor * 35static __async_inline struct dma_async_tx_descriptor *
36do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, 36do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
37 unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
38 struct async_submit_ctl *submit) 37 struct async_submit_ctl *submit)
39{ 38{
40 struct dma_device *dma = chan->device; 39 struct dma_device *dma = chan->device;
41 struct dma_async_tx_descriptor *tx = NULL; 40 struct dma_async_tx_descriptor *tx = NULL;
42 int src_off = 0;
43 int i;
44 dma_async_tx_callback cb_fn_orig = submit->cb_fn; 41 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
45 void *cb_param_orig = submit->cb_param; 42 void *cb_param_orig = submit->cb_param;
46 enum async_tx_flags flags_orig = submit->flags; 43 enum async_tx_flags flags_orig = submit->flags;
47 enum dma_ctrl_flags dma_flags; 44 enum dma_ctrl_flags dma_flags = 0;
48 int xor_src_cnt = 0; 45 int src_cnt = unmap->to_cnt;
49 dma_addr_t dma_dest; 46 int xor_src_cnt;
50 47 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
51 /* map the dest bidrectional in case it is re-used as a source */ 48 dma_addr_t *src_list = unmap->addr;
52 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
53 for (i = 0; i < src_cnt; i++) {
54 /* only map the dest once */
55 if (!src_list[i])
56 continue;
57 if (unlikely(src_list[i] == dest)) {
58 dma_src[xor_src_cnt++] = dma_dest;
59 continue;
60 }
61 dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
62 len, DMA_TO_DEVICE);
63 }
64 src_cnt = xor_src_cnt;
65 49
66 while (src_cnt) { 50 while (src_cnt) {
51 dma_addr_t tmp;
52
67 submit->flags = flags_orig; 53 submit->flags = flags_orig;
68 dma_flags = 0;
69 xor_src_cnt = min(src_cnt, (int)dma->max_xor); 54 xor_src_cnt = min(src_cnt, (int)dma->max_xor);
70 /* if we are submitting additional xors, leave the chain open, 55 /* if we are submitting additional xors, leave the chain open
71 * clear the callback parameters, and leave the destination 56 * and clear the callback parameters
72 * buffer mapped
73 */ 57 */
74 if (src_cnt > xor_src_cnt) { 58 if (src_cnt > xor_src_cnt) {
75 submit->flags &= ~ASYNC_TX_ACK; 59 submit->flags &= ~ASYNC_TX_ACK;
76 submit->flags |= ASYNC_TX_FENCE; 60 submit->flags |= ASYNC_TX_FENCE;
77 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
78 submit->cb_fn = NULL; 61 submit->cb_fn = NULL;
79 submit->cb_param = NULL; 62 submit->cb_param = NULL;
80 } else { 63 } else {
@@ -85,12 +68,18 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
85 dma_flags |= DMA_PREP_INTERRUPT; 68 dma_flags |= DMA_PREP_INTERRUPT;
86 if (submit->flags & ASYNC_TX_FENCE) 69 if (submit->flags & ASYNC_TX_FENCE)
87 dma_flags |= DMA_PREP_FENCE; 70 dma_flags |= DMA_PREP_FENCE;
88 /* Since we have clobbered the src_list we are committed 71
89 * to doing this asynchronously. Drivers force forward progress 72 /* Drivers force forward progress in case they can not provide a
90 * in case they can not provide a descriptor 73 * descriptor
91 */ 74 */
92 tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], 75 tmp = src_list[0];
93 xor_src_cnt, len, dma_flags); 76 if (src_list > unmap->addr)
77 src_list[0] = dma_dest;
78 tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
79 xor_src_cnt, unmap->len,
80 dma_flags);
81 src_list[0] = tmp;
82
94 83
95 if (unlikely(!tx)) 84 if (unlikely(!tx))
96 async_tx_quiesce(&submit->depend_tx); 85 async_tx_quiesce(&submit->depend_tx);
@@ -99,22 +88,21 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
99 while (unlikely(!tx)) { 88 while (unlikely(!tx)) {
100 dma_async_issue_pending(chan); 89 dma_async_issue_pending(chan);
101 tx = dma->device_prep_dma_xor(chan, dma_dest, 90 tx = dma->device_prep_dma_xor(chan, dma_dest,
102 &dma_src[src_off], 91 src_list,
103 xor_src_cnt, len, 92 xor_src_cnt, unmap->len,
104 dma_flags); 93 dma_flags);
105 } 94 }
106 95
96 dma_set_unmap(tx, unmap);
107 async_tx_submit(chan, tx, submit); 97 async_tx_submit(chan, tx, submit);
108 submit->depend_tx = tx; 98 submit->depend_tx = tx;
109 99
110 if (src_cnt > xor_src_cnt) { 100 if (src_cnt > xor_src_cnt) {
111 /* drop completed sources */ 101 /* drop completed sources */
112 src_cnt -= xor_src_cnt; 102 src_cnt -= xor_src_cnt;
113 src_off += xor_src_cnt;
114
115 /* use the intermediate result a source */ 103 /* use the intermediate result a source */
116 dma_src[--src_off] = dma_dest;
117 src_cnt++; 104 src_cnt++;
105 src_list += xor_src_cnt - 1;
118 } else 106 } else
119 break; 107 break;
120 } 108 }
@@ -189,22 +177,40 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
189 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, 177 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
190 &dest, 1, src_list, 178 &dest, 1, src_list,
191 src_cnt, len); 179 src_cnt, len);
192 dma_addr_t *dma_src = NULL; 180 struct dma_device *device = chan ? chan->device : NULL;
181 struct dmaengine_unmap_data *unmap = NULL;
193 182
194 BUG_ON(src_cnt <= 1); 183 BUG_ON(src_cnt <= 1);
195 184
196 if (submit->scribble) 185 if (device)
197 dma_src = submit->scribble; 186 unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
198 else if (sizeof(dma_addr_t) <= sizeof(struct page *)) 187
199 dma_src = (dma_addr_t *) src_list; 188 if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
189 struct dma_async_tx_descriptor *tx;
190 int i, j;
200 191
201 if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
202 /* run the xor asynchronously */ 192 /* run the xor asynchronously */
203 pr_debug("%s (async): len: %zu\n", __func__, len); 193 pr_debug("%s (async): len: %zu\n", __func__, len);
204 194
205 return do_async_xor(chan, dest, src_list, offset, src_cnt, len, 195 unmap->len = len;
206 dma_src, submit); 196 for (i = 0, j = 0; i < src_cnt; i++) {
197 if (!src_list[i])
198 continue;
199 unmap->to_cnt++;
200 unmap->addr[j++] = dma_map_page(device->dev, src_list[i],
201 offset, len, DMA_TO_DEVICE);
202 }
203
204 /* map it bidirectional as it may be re-used as a source */
205 unmap->addr[j] = dma_map_page(device->dev, dest, offset, len,
206 DMA_BIDIRECTIONAL);
207 unmap->bidi_cnt = 1;
208
209 tx = do_async_xor(chan, unmap, submit);
210 dmaengine_unmap_put(unmap);
211 return tx;
207 } else { 212 } else {
213 dmaengine_unmap_put(unmap);
208 /* run the xor synchronously */ 214 /* run the xor synchronously */
209 pr_debug("%s (sync): len: %zu\n", __func__, len); 215 pr_debug("%s (sync): len: %zu\n", __func__, len);
210 WARN_ONCE(chan, "%s: no space for dma address conversion\n", 216 WARN_ONCE(chan, "%s: no space for dma address conversion\n",
@@ -268,16 +274,14 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
268 struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); 274 struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
269 struct dma_device *device = chan ? chan->device : NULL; 275 struct dma_device *device = chan ? chan->device : NULL;
270 struct dma_async_tx_descriptor *tx = NULL; 276 struct dma_async_tx_descriptor *tx = NULL;
271 dma_addr_t *dma_src = NULL; 277 struct dmaengine_unmap_data *unmap = NULL;
272 278
273 BUG_ON(src_cnt <= 1); 279 BUG_ON(src_cnt <= 1);
274 280
275 if (submit->scribble) 281 if (device)
276 dma_src = submit->scribble; 282 unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
277 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
278 dma_src = (dma_addr_t *) src_list;
279 283
280 if (dma_src && device && src_cnt <= device->max_xor && 284 if (unmap && src_cnt <= device->max_xor &&
281 is_dma_xor_aligned(device, offset, 0, len)) { 285 is_dma_xor_aligned(device, offset, 0, len)) {
282 unsigned long dma_prep_flags = 0; 286 unsigned long dma_prep_flags = 0;
283 int i; 287 int i;
@@ -288,11 +292,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
288 dma_prep_flags |= DMA_PREP_INTERRUPT; 292 dma_prep_flags |= DMA_PREP_INTERRUPT;
289 if (submit->flags & ASYNC_TX_FENCE) 293 if (submit->flags & ASYNC_TX_FENCE)
290 dma_prep_flags |= DMA_PREP_FENCE; 294 dma_prep_flags |= DMA_PREP_FENCE;
291 for (i = 0; i < src_cnt; i++)
292 dma_src[i] = dma_map_page(device->dev, src_list[i],
293 offset, len, DMA_TO_DEVICE);
294 295
295 tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, 296 for (i = 0; i < src_cnt; i++) {
297 unmap->addr[i] = dma_map_page(device->dev, src_list[i],
298 offset, len, DMA_TO_DEVICE);
299 unmap->to_cnt++;
300 }
301 unmap->len = len;
302
303 tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt,
296 len, result, 304 len, result,
297 dma_prep_flags); 305 dma_prep_flags);
298 if (unlikely(!tx)) { 306 if (unlikely(!tx)) {
@@ -301,11 +309,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
301 while (!tx) { 309 while (!tx) {
302 dma_async_issue_pending(chan); 310 dma_async_issue_pending(chan);
303 tx = device->device_prep_dma_xor_val(chan, 311 tx = device->device_prep_dma_xor_val(chan,
304 dma_src, src_cnt, len, result, 312 unmap->addr, src_cnt, len, result,
305 dma_prep_flags); 313 dma_prep_flags);
306 } 314 }
307 } 315 }
308 316 dma_set_unmap(tx, unmap);
309 async_tx_submit(chan, tx, submit); 317 async_tx_submit(chan, tx, submit);
310 } else { 318 } else {
311 enum async_tx_flags flags_orig = submit->flags; 319 enum async_tx_flags flags_orig = submit->flags;
@@ -327,6 +335,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
327 async_tx_sync_epilog(submit); 335 async_tx_sync_epilog(submit);
328 submit->flags = flags_orig; 336 submit->flags = flags_orig;
329 } 337 }
338 dmaengine_unmap_put(unmap);
330 339
331 return tx; 340 return tx;
332} 341}
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index 4a92bac744dc..dad95f45b88f 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -28,7 +28,7 @@
28#undef pr 28#undef pr
29#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) 29#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
30 30
31#define NDISKS 16 /* Including P and Q */ 31#define NDISKS 64 /* Including P and Q */
32 32
33static struct page *dataptrs[NDISKS]; 33static struct page *dataptrs[NDISKS];
34static addr_conv_t addr_conv[NDISKS]; 34static addr_conv_t addr_conv[NDISKS];
@@ -219,6 +219,14 @@ static int raid6_test(void)
219 err += test(11, &tests); 219 err += test(11, &tests);
220 err += test(12, &tests); 220 err += test(12, &tests);
221 } 221 }
222
223 /* the 24 disk case is special for ioatdma as it is the boudary point
224 * at which it needs to switch from 8-source ops to 16-source
225 * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
226 */
227 if (NDISKS > 24)
228 err += test(24, &tests);
229
222 err += test(NDISKS, &tests); 230 err += test(NDISKS, &tests);
223 231
224 pr("\n"); 232 pr("\n");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index ffce19de05cf..e1223559d5df 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err)
52 aead_request_complete(req, err); 52 aead_request_complete(req, err);
53} 53}
54 54
55static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, 55int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
56 unsigned int keylen) 56 unsigned int keylen)
57{ 57{
58 unsigned int authkeylen; 58 struct rtattr *rta = (struct rtattr *)key;
59 unsigned int enckeylen;
60 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
61 struct crypto_ahash *auth = ctx->auth;
62 struct crypto_ablkcipher *enc = ctx->enc;
63 struct rtattr *rta = (void *)key;
64 struct crypto_authenc_key_param *param; 59 struct crypto_authenc_key_param *param;
65 int err = -EINVAL;
66 60
67 if (!RTA_OK(rta, keylen)) 61 if (!RTA_OK(rta, keylen))
68 goto badkey; 62 return -EINVAL;
69 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 63 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
70 goto badkey; 64 return -EINVAL;
71 if (RTA_PAYLOAD(rta) < sizeof(*param)) 65 if (RTA_PAYLOAD(rta) < sizeof(*param))
72 goto badkey; 66 return -EINVAL;
73 67
74 param = RTA_DATA(rta); 68 param = RTA_DATA(rta);
75 enckeylen = be32_to_cpu(param->enckeylen); 69 keys->enckeylen = be32_to_cpu(param->enckeylen);
76 70
77 key += RTA_ALIGN(rta->rta_len); 71 key += RTA_ALIGN(rta->rta_len);
78 keylen -= RTA_ALIGN(rta->rta_len); 72 keylen -= RTA_ALIGN(rta->rta_len);
79 73
80 if (keylen < enckeylen) 74 if (keylen < keys->enckeylen)
81 goto badkey; 75 return -EINVAL;
82 76
83 authkeylen = keylen - enckeylen; 77 keys->authkeylen = keylen - keys->enckeylen;
78 keys->authkey = key;
79 keys->enckey = key + keys->authkeylen;
80
81 return 0;
82}
83EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
84
85static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
86 unsigned int keylen)
87{
88 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
89 struct crypto_ahash *auth = ctx->auth;
90 struct crypto_ablkcipher *enc = ctx->enc;
91 struct crypto_authenc_keys keys;
92 int err = -EINVAL;
93
94 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
95 goto badkey;
84 96
85 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); 97 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
86 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & 98 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
87 CRYPTO_TFM_REQ_MASK); 99 CRYPTO_TFM_REQ_MASK);
88 err = crypto_ahash_setkey(auth, key, authkeylen); 100 err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
89 crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & 101 crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
90 CRYPTO_TFM_RES_MASK); 102 CRYPTO_TFM_RES_MASK);
91 103
@@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
95 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); 107 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
96 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & 108 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
97 CRYPTO_TFM_REQ_MASK); 109 CRYPTO_TFM_REQ_MASK);
98 err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); 110 err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
99 crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & 111 crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
100 CRYPTO_TFM_RES_MASK); 112 CRYPTO_TFM_RES_MASK);
101 113
@@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
188 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 200 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
189 authsize, 0); 201 authsize, 0);
190 202
191 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 203 err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
192 if (err) 204 if (err)
193 goto out; 205 goto out;
194 206
@@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
227 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 239 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
228 authsize, 0); 240 authsize, 0);
229 241
230 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 242 err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
231 if (err) 243 if (err)
232 goto out; 244 goto out;
233 245
@@ -368,9 +380,10 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
368 if (!err) { 380 if (!err) {
369 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 381 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
370 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 382 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
371 struct ablkcipher_request *abreq = aead_request_ctx(areq); 383 struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
372 u8 *iv = (u8 *)(abreq + 1) + 384 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
373 crypto_ablkcipher_reqsize(ctx->enc); 385 + ctx->reqoff);
386 u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
374 387
375 err = crypto_authenc_genicv(areq, iv, 0); 388 err = crypto_authenc_genicv(areq, iv, 0);
376 } 389 }
@@ -462,7 +475,7 @@ static int crypto_authenc_verify(struct aead_request *req,
462 ihash = ohash + authsize; 475 ihash = ohash + authsize;
463 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 476 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
464 authsize, 0); 477 authsize, 0);
465 return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; 478 return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
466} 479}
467 480
468static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, 481static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index ab53762fc309..4be0dd4373a9 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
59static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, 59static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
60 unsigned int keylen) 60 unsigned int keylen)
61{ 61{
62 unsigned int authkeylen;
63 unsigned int enckeylen;
64 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 62 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
65 struct crypto_ahash *auth = ctx->auth; 63 struct crypto_ahash *auth = ctx->auth;
66 struct crypto_ablkcipher *enc = ctx->enc; 64 struct crypto_ablkcipher *enc = ctx->enc;
67 struct rtattr *rta = (void *)key; 65 struct crypto_authenc_keys keys;
68 struct crypto_authenc_key_param *param;
69 int err = -EINVAL; 66 int err = -EINVAL;
70 67
71 if (!RTA_OK(rta, keylen)) 68 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
72 goto badkey; 69 goto badkey;
73 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
74 goto badkey;
75 if (RTA_PAYLOAD(rta) < sizeof(*param))
76 goto badkey;
77
78 param = RTA_DATA(rta);
79 enckeylen = be32_to_cpu(param->enckeylen);
80
81 key += RTA_ALIGN(rta->rta_len);
82 keylen -= RTA_ALIGN(rta->rta_len);
83
84 if (keylen < enckeylen)
85 goto badkey;
86
87 authkeylen = keylen - enckeylen;
88 70
89 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); 71 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
90 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & 72 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
91 CRYPTO_TFM_REQ_MASK); 73 CRYPTO_TFM_REQ_MASK);
92 err = crypto_ahash_setkey(auth, key, authkeylen); 74 err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
93 crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & 75 crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
94 CRYPTO_TFM_RES_MASK); 76 CRYPTO_TFM_RES_MASK);
95 77
@@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
99 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); 81 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
100 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & 82 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
101 CRYPTO_TFM_REQ_MASK); 83 CRYPTO_TFM_REQ_MASK);
102 err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); 84 err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
103 crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & 85 crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
104 CRYPTO_TFM_RES_MASK); 86 CRYPTO_TFM_RES_MASK);
105 87
@@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar
247 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 229 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
248 authsize, 0); 230 authsize, 0);
249 231
250 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 232 err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
251 if (err) 233 if (err)
252 goto out; 234 goto out;
253 235
@@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a
296 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 278 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
297 authsize, 0); 279 authsize, 0);
298 280
299 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 281 err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
300 if (err) 282 if (err)
301 goto out; 283 goto out;
302 284
@@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
336 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 318 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
337 authsize, 0); 319 authsize, 0);
338 320
339 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 321 err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
340 if (err) 322 if (err)
341 goto out; 323 goto out;
342 324
@@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req)
568 ihash = ohash + authsize; 550 ihash = ohash + authsize;
569 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 551 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
570 authsize, 0); 552 authsize, 0);
571 return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; 553 return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
572} 554}
573 555
574static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, 556static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 499c91717d93..1df84217f7c9 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -271,7 +271,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
271 } 271 }
272 272
273 /* compute plaintext into mac */ 273 /* compute plaintext into mac */
274 get_data_to_compute(cipher, pctx, plain, cryptlen); 274 if (cryptlen)
275 get_data_to_compute(cipher, pctx, plain, cryptlen);
275 276
276out: 277out:
277 return err; 278 return err;
@@ -363,7 +364,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
363 364
364 if (!err) { 365 if (!err) {
365 err = crypto_ccm_auth(req, req->dst, cryptlen); 366 err = crypto_ccm_auth(req, req->dst, cryptlen);
366 if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) 367 if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
367 err = -EBADMSG; 368 err = -EBADMSG;
368 } 369 }
369 aead_request_complete(req, err); 370 aead_request_complete(req, err);
@@ -422,7 +423,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
422 return err; 423 return err;
423 424
424 /* verify */ 425 /* verify */
425 if (memcmp(authtag, odata, authsize)) 426 if (crypto_memneq(authtag, odata, authsize))
426 return -EBADMSG; 427 return -EBADMSG;
427 428
428 return err; 429 return err;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 43e1fb05ea54..b4f017939004 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req,
582 582
583 crypto_xor(auth_tag, iauth_tag, 16); 583 crypto_xor(auth_tag, iauth_tag, 16);
584 scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); 584 scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
585 return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; 585 return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
586} 586}
587 587
588static void gcm_decrypt_done(struct crypto_async_request *areq, int err) 588static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
diff --git a/crypto/hash_info.c b/crypto/hash_info.c
new file mode 100644
index 000000000000..3e7ff46f26e8
--- /dev/null
+++ b/crypto/hash_info.c
@@ -0,0 +1,56 @@
1/*
2 * Hash Info: Hash algorithms information
3 *
4 * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/export.h>
14#include <crypto/hash_info.h>
15
16const char *const hash_algo_name[HASH_ALGO__LAST] = {
17 [HASH_ALGO_MD4] = "md4",
18 [HASH_ALGO_MD5] = "md5",
19 [HASH_ALGO_SHA1] = "sha1",
20 [HASH_ALGO_RIPE_MD_160] = "rmd160",
21 [HASH_ALGO_SHA256] = "sha256",
22 [HASH_ALGO_SHA384] = "sha384",
23 [HASH_ALGO_SHA512] = "sha512",
24 [HASH_ALGO_SHA224] = "sha224",
25 [HASH_ALGO_RIPE_MD_128] = "rmd128",
26 [HASH_ALGO_RIPE_MD_256] = "rmd256",
27 [HASH_ALGO_RIPE_MD_320] = "rmd320",
28 [HASH_ALGO_WP_256] = "wp256",
29 [HASH_ALGO_WP_384] = "wp384",
30 [HASH_ALGO_WP_512] = "wp512",
31 [HASH_ALGO_TGR_128] = "tgr128",
32 [HASH_ALGO_TGR_160] = "tgr160",
33 [HASH_ALGO_TGR_192] = "tgr192",
34};
35EXPORT_SYMBOL_GPL(hash_algo_name);
36
37const int hash_digest_size[HASH_ALGO__LAST] = {
38 [HASH_ALGO_MD4] = MD5_DIGEST_SIZE,
39 [HASH_ALGO_MD5] = MD5_DIGEST_SIZE,
40 [HASH_ALGO_SHA1] = SHA1_DIGEST_SIZE,
41 [HASH_ALGO_RIPE_MD_160] = RMD160_DIGEST_SIZE,
42 [HASH_ALGO_SHA256] = SHA256_DIGEST_SIZE,
43 [HASH_ALGO_SHA384] = SHA384_DIGEST_SIZE,
44 [HASH_ALGO_SHA512] = SHA512_DIGEST_SIZE,
45 [HASH_ALGO_SHA224] = SHA224_DIGEST_SIZE,
46 [HASH_ALGO_RIPE_MD_128] = RMD128_DIGEST_SIZE,
47 [HASH_ALGO_RIPE_MD_256] = RMD256_DIGEST_SIZE,
48 [HASH_ALGO_RIPE_MD_320] = RMD320_DIGEST_SIZE,
49 [HASH_ALGO_WP_256] = WP256_DIGEST_SIZE,
50 [HASH_ALGO_WP_384] = WP384_DIGEST_SIZE,
51 [HASH_ALGO_WP_512] = WP512_DIGEST_SIZE,
52 [HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE,
53 [HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE,
54 [HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE,
55};
56EXPORT_SYMBOL_GPL(hash_digest_size);
diff --git a/crypto/memneq.c b/crypto/memneq.c
new file mode 100644
index 000000000000..cd0162221c14
--- /dev/null
+++ b/crypto/memneq.c
@@ -0,0 +1,138 @@
1/*
2 * Constant-time equality testing of memory regions.
3 *
4 * Authors:
5 *
6 * James Yonan <james@openvpn.net>
7 * Daniel Borkmann <dborkman@redhat.com>
8 *
9 * This file is provided under a dual BSD/GPLv2 license. When using or
10 * redistributing this file, you may do so under either license.
11 *
12 * GPL LICENSE SUMMARY
13 *
14 * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of version 2 of the GNU General Public License as
18 * published by the Free Software Foundation.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28 * The full GNU General Public License is included in this distribution
29 * in the file called LICENSE.GPL.
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 * * Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * * Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
44 * distribution.
45 * * Neither the name of OpenVPN Technologies nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 */
61
62#include <crypto/algapi.h>
63
64#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
65
66/* Generic path for arbitrary size */
67static inline unsigned long
68__crypto_memneq_generic(const void *a, const void *b, size_t size)
69{
70 unsigned long neq = 0;
71
72#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
73 while (size >= sizeof(unsigned long)) {
74 neq |= *(unsigned long *)a ^ *(unsigned long *)b;
75 a += sizeof(unsigned long);
76 b += sizeof(unsigned long);
77 size -= sizeof(unsigned long);
78 }
79#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
80 while (size > 0) {
81 neq |= *(unsigned char *)a ^ *(unsigned char *)b;
82 a += 1;
83 b += 1;
84 size -= 1;
85 }
86 return neq;
87}
88
89/* Loop-free fast-path for frequently used 16-byte size */
90static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
91{
92#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
93 if (sizeof(unsigned long) == 8)
94 return ((*(unsigned long *)(a) ^ *(unsigned long *)(b))
95 | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
96 else if (sizeof(unsigned int) == 4)
97 return ((*(unsigned int *)(a) ^ *(unsigned int *)(b))
98 | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4))
99 | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8))
100 | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
101 else
102#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
103 return ((*(unsigned char *)(a) ^ *(unsigned char *)(b))
104 | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1))
105 | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2))
106 | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3))
107 | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4))
108 | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5))
109 | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6))
110 | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7))
111 | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8))
112 | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9))
113 | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
114 | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
115 | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
116 | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
117 | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
118 | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
119}
120
121/* Compare two areas of memory without leaking timing information,
122 * and with special optimizations for common sizes. Users should
123 * not call this function directly, but should instead use
124 * crypto_memneq defined in crypto/algapi.h.
125 */
126noinline unsigned long __crypto_memneq(const void *a, const void *b,
127 size_t size)
128{
129 switch (size) {
130 case 16:
131 return __crypto_memneq_16(a, b);
132 default:
133 return __crypto_memneq_generic(a, b, size);
134 }
135}
136EXPORT_SYMBOL(__crypto_memneq);
137
138#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1ab8258fcf56..001f07cdb828 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1242,6 +1242,10 @@ static int do_test(int m)
1242 ret += tcrypt_test("cmac(des3_ede)"); 1242 ret += tcrypt_test("cmac(des3_ede)");
1243 break; 1243 break;
1244 1244
1245 case 155:
1246 ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
1247 break;
1248
1245 case 200: 1249 case 200:
1246 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, 1250 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
1247 speed_template_16_24_32); 1251 speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 432afc03e7c3..77955507f6f1 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -503,16 +503,16 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
503 goto out; 503 goto out;
504 } 504 }
505 505
506 sg_init_one(&sg[0], input,
507 template[i].ilen + (enc ? authsize : 0));
508
509 if (diff_dst) { 506 if (diff_dst) {
510 output = xoutbuf[0]; 507 output = xoutbuf[0];
511 output += align_offset; 508 output += align_offset;
509 sg_init_one(&sg[0], input, template[i].ilen);
512 sg_init_one(&sgout[0], output, 510 sg_init_one(&sgout[0], output,
511 template[i].rlen);
512 } else {
513 sg_init_one(&sg[0], input,
513 template[i].ilen + 514 template[i].ilen +
514 (enc ? authsize : 0)); 515 (enc ? authsize : 0));
515 } else {
516 output = input; 516 output = input;
517 } 517 }
518 518
@@ -612,12 +612,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
612 memcpy(q, template[i].input + temp, 612 memcpy(q, template[i].input + temp,
613 template[i].tap[k]); 613 template[i].tap[k]);
614 614
615 n = template[i].tap[k];
616 if (k == template[i].np - 1 && enc)
617 n += authsize;
618 if (offset_in_page(q) + n < PAGE_SIZE)
619 q[n] = 0;
620
621 sg_set_buf(&sg[k], q, template[i].tap[k]); 615 sg_set_buf(&sg[k], q, template[i].tap[k]);
622 616
623 if (diff_dst) { 617 if (diff_dst) {
@@ -625,13 +619,17 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
625 offset_in_page(IDX[k]); 619 offset_in_page(IDX[k]);
626 620
627 memset(q, 0, template[i].tap[k]); 621 memset(q, 0, template[i].tap[k]);
628 if (offset_in_page(q) + n < PAGE_SIZE)
629 q[n] = 0;
630 622
631 sg_set_buf(&sgout[k], q, 623 sg_set_buf(&sgout[k], q,
632 template[i].tap[k]); 624 template[i].tap[k]);
633 } 625 }
634 626
627 n = template[i].tap[k];
628 if (k == template[i].np - 1 && enc)
629 n += authsize;
630 if (offset_in_page(q) + n < PAGE_SIZE)
631 q[n] = 0;
632
635 temp += template[i].tap[k]; 633 temp += template[i].tap[k];
636 } 634 }
637 635
@@ -650,10 +648,10 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
650 goto out; 648 goto out;
651 } 649 }
652 650
653 sg[k - 1].length += authsize;
654
655 if (diff_dst) 651 if (diff_dst)
656 sgout[k - 1].length += authsize; 652 sgout[k - 1].length += authsize;
653 else
654 sg[k - 1].length += authsize;
657 } 655 }
658 656
659 sg_init_table(asg, template[i].anp); 657 sg_init_table(asg, template[i].anp);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c95df0b8c880..5d9248526d78 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -235,17 +235,6 @@ config ACPI_INITRD_TABLE_OVERRIDE
235 initrd, therefore it's safe to say Y. 235 initrd, therefore it's safe to say Y.
236 See Documentation/acpi/initrd_table_override.txt for details 236 See Documentation/acpi/initrd_table_override.txt for details
237 237
238config ACPI_BLACKLIST_YEAR
239 int "Disable ACPI for systems before Jan 1st this year" if X86_32
240 default 0
241 help
242 Enter a 4-digit year, e.g., 2001, to disable ACPI by default
243 on platforms with DMI BIOS date before January 1st that year.
244 "acpi=force" can be used to override this mechanism.
245
246 Enter 0 to disable this mechanism and allow ACPI to
247 run by default no matter what the year. (default)
248
249config ACPI_DEBUG 238config ACPI_DEBUG
250 bool "Debug Statements" 239 bool "Debug Statements"
251 default n 240 default n
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index b9f0d5f4bba5..8711e3797165 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -56,7 +56,6 @@ static int ac_sleep_before_get_state_ms;
56 56
57struct acpi_ac { 57struct acpi_ac {
58 struct power_supply charger; 58 struct power_supply charger;
59 struct acpi_device *adev;
60 struct platform_device *pdev; 59 struct platform_device *pdev;
61 unsigned long long state; 60 unsigned long long state;
62}; 61};
@@ -70,8 +69,9 @@ struct acpi_ac {
70static int acpi_ac_get_state(struct acpi_ac *ac) 69static int acpi_ac_get_state(struct acpi_ac *ac)
71{ 70{
72 acpi_status status; 71 acpi_status status;
72 acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev);
73 73
74 status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL, 74 status = acpi_evaluate_integer(handle, "_PSR", NULL,
75 &ac->state); 75 &ac->state);
76 if (ACPI_FAILURE(status)) { 76 if (ACPI_FAILURE(status)) {
77 ACPI_EXCEPTION((AE_INFO, status, 77 ACPI_EXCEPTION((AE_INFO, status,
@@ -119,6 +119,7 @@ static enum power_supply_property ac_props[] = {
119static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) 119static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
120{ 120{
121 struct acpi_ac *ac = data; 121 struct acpi_ac *ac = data;
122 struct acpi_device *adev;
122 123
123 if (!ac) 124 if (!ac)
124 return; 125 return;
@@ -141,10 +142,11 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
141 msleep(ac_sleep_before_get_state_ms); 142 msleep(ac_sleep_before_get_state_ms);
142 143
143 acpi_ac_get_state(ac); 144 acpi_ac_get_state(ac);
144 acpi_bus_generate_netlink_event(ac->adev->pnp.device_class, 145 adev = ACPI_COMPANION(&ac->pdev->dev);
146 acpi_bus_generate_netlink_event(adev->pnp.device_class,
145 dev_name(&ac->pdev->dev), 147 dev_name(&ac->pdev->dev),
146 event, (u32) ac->state); 148 event, (u32) ac->state);
147 acpi_notifier_call_chain(ac->adev, event, (u32) ac->state); 149 acpi_notifier_call_chain(adev, event, (u32) ac->state);
148 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 150 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
149 } 151 }
150 152
@@ -178,8 +180,8 @@ static int acpi_ac_probe(struct platform_device *pdev)
178 if (!pdev) 180 if (!pdev)
179 return -EINVAL; 181 return -EINVAL;
180 182
181 result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev); 183 adev = ACPI_COMPANION(&pdev->dev);
182 if (result) 184 if (!adev)
183 return -ENODEV; 185 return -ENODEV;
184 186
185 ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); 187 ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
@@ -188,7 +190,6 @@ static int acpi_ac_probe(struct platform_device *pdev)
188 190
189 strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); 191 strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
190 strcpy(acpi_device_class(adev), ACPI_AC_CLASS); 192 strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
191 ac->adev = adev;
192 ac->pdev = pdev; 193 ac->pdev = pdev;
193 platform_set_drvdata(pdev, ac); 194 platform_set_drvdata(pdev, ac);
194 195
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index d3961014aad7..6745fe137b9e 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -163,6 +163,15 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
163 { "80860F41", (unsigned long)&byt_i2c_dev_desc }, 163 { "80860F41", (unsigned long)&byt_i2c_dev_desc },
164 { "INT33B2", }, 164 { "INT33B2", },
165 165
166 { "INT3430", (unsigned long)&lpt_dev_desc },
167 { "INT3431", (unsigned long)&lpt_dev_desc },
168 { "INT3432", (unsigned long)&lpt_dev_desc },
169 { "INT3433", (unsigned long)&lpt_dev_desc },
170 { "INT3434", (unsigned long)&lpt_uart_dev_desc },
171 { "INT3435", (unsigned long)&lpt_uart_dev_desc },
172 { "INT3436", (unsigned long)&lpt_sdio_dev_desc },
173 { "INT3437", },
174
166 { } 175 { }
167}; 176};
168 177
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 8a4cfc7e71f0..dbfe49e5fd63 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -111,7 +111,7 @@ int acpi_create_platform_device(struct acpi_device *adev,
111 pdevinfo.id = -1; 111 pdevinfo.id = -1;
112 pdevinfo.res = resources; 112 pdevinfo.res = resources;
113 pdevinfo.num_res = count; 113 pdevinfo.num_res = count;
114 pdevinfo.acpi_node.handle = adev->handle; 114 pdevinfo.acpi_node.companion = adev;
115 pdev = platform_device_register_full(&pdevinfo); 115 pdev = platform_device_register_full(&pdevinfo);
116 if (IS_ERR(pdev)) { 116 if (IS_ERR(pdev)) {
117 dev_err(&adev->dev, "platform device creation failed: %ld\n", 117 dev_err(&adev->dev, "platform device creation failed: %ld\n",
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index f691d0e4d9fa..ff97430455cb 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -184,7 +184,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
184 struct acpi_buffer *output_buffer); 184 struct acpi_buffer *output_buffer);
185 185
186acpi_status 186acpi_status
187acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer, 187acpi_rs_create_aml_resources(struct acpi_buffer *resource_list,
188 struct acpi_buffer *output_buffer); 188 struct acpi_buffer *output_buffer);
189 189
190acpi_status 190acpi_status
@@ -227,8 +227,8 @@ acpi_rs_get_list_length(u8 * aml_buffer,
227 u32 aml_buffer_length, acpi_size * size_needed); 227 u32 aml_buffer_length, acpi_size * size_needed);
228 228
229acpi_status 229acpi_status
230acpi_rs_get_aml_length(struct acpi_resource *linked_list_buffer, 230acpi_rs_get_aml_length(struct acpi_resource *resource_list,
231 acpi_size * size_needed); 231 acpi_size resource_list_size, acpi_size * size_needed);
232 232
233acpi_status 233acpi_status
234acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, 234acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 243737363fb8..fd1ff54cda19 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -106,6 +106,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
106void acpi_ns_delete_node(struct acpi_namespace_node *node) 106void acpi_ns_delete_node(struct acpi_namespace_node *node)
107{ 107{
108 union acpi_operand_object *obj_desc; 108 union acpi_operand_object *obj_desc;
109 union acpi_operand_object *next_desc;
109 110
110 ACPI_FUNCTION_NAME(ns_delete_node); 111 ACPI_FUNCTION_NAME(ns_delete_node);
111 112
@@ -114,12 +115,13 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
114 acpi_ns_detach_object(node); 115 acpi_ns_detach_object(node);
115 116
116 /* 117 /*
117 * Delete an attached data object if present (an object that was created 118 * Delete an attached data object list if present (objects that were
118 * and attached via acpi_attach_data). Note: After any normal object is 119 * attached via acpi_attach_data). Note: After any normal object is
119 * detached above, the only possible remaining object is a data object. 120 * detached above, the only possible remaining object(s) are data
121 * objects, in a linked list.
120 */ 122 */
121 obj_desc = node->object; 123 obj_desc = node->object;
122 if (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) { 124 while (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) {
123 125
124 /* Invoke the attached data deletion handler if present */ 126 /* Invoke the attached data deletion handler if present */
125 127
@@ -127,7 +129,15 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
127 obj_desc->data.handler(node, obj_desc->data.pointer); 129 obj_desc->data.handler(node, obj_desc->data.pointer);
128 } 130 }
129 131
132 next_desc = obj_desc->common.next_object;
130 acpi_ut_remove_reference(obj_desc); 133 acpi_ut_remove_reference(obj_desc);
134 obj_desc = next_desc;
135 }
136
137 /* Special case for the statically allocated root node */
138
139 if (node == acpi_gbl_root_node) {
140 return;
131 } 141 }
132 142
133 /* Now we can delete the node */ 143 /* Now we can delete the node */
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index cc2fea94c5f0..4a0665b6bcc1 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -593,24 +593,26 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
593 593
594void acpi_ns_terminate(void) 594void acpi_ns_terminate(void)
595{ 595{
596 union acpi_operand_object *obj_desc; 596 acpi_status status;
597 597
598 ACPI_FUNCTION_TRACE(ns_terminate); 598 ACPI_FUNCTION_TRACE(ns_terminate);
599 599
600 /* 600 /*
601 * 1) Free the entire namespace -- all nodes and objects 601 * Free the entire namespace -- all nodes and all objects
602 * 602 * attached to the nodes
603 * Delete all object descriptors attached to namepsace nodes
604 */ 603 */
605 acpi_ns_delete_namespace_subtree(acpi_gbl_root_node); 604 acpi_ns_delete_namespace_subtree(acpi_gbl_root_node);
606 605
607 /* Detach any objects attached to the root */ 606 /* Delete any objects attached to the root node */
608 607
609 obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node); 608 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
610 if (obj_desc) { 609 if (ACPI_FAILURE(status)) {
611 acpi_ns_detach_object(acpi_gbl_root_node); 610 return_VOID;
612 } 611 }
613 612
613 acpi_ns_delete_node(acpi_gbl_root_node);
614 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
615
614 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n")); 616 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
615 return_VOID; 617 return_VOID;
616} 618}
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index b62a0f4f4f9b..b60c9cf82862 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -174,6 +174,7 @@ acpi_rs_stream_option_length(u32 resource_length,
174 * FUNCTION: acpi_rs_get_aml_length 174 * FUNCTION: acpi_rs_get_aml_length
175 * 175 *
176 * PARAMETERS: resource - Pointer to the resource linked list 176 * PARAMETERS: resource - Pointer to the resource linked list
177 * resource_list_size - Size of the resource linked list
177 * size_needed - Where the required size is returned 178 * size_needed - Where the required size is returned
178 * 179 *
179 * RETURN: Status 180 * RETURN: Status
@@ -185,16 +186,20 @@ acpi_rs_stream_option_length(u32 resource_length,
185 ******************************************************************************/ 186 ******************************************************************************/
186 187
187acpi_status 188acpi_status
188acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed) 189acpi_rs_get_aml_length(struct acpi_resource *resource,
190 acpi_size resource_list_size, acpi_size * size_needed)
189{ 191{
190 acpi_size aml_size_needed = 0; 192 acpi_size aml_size_needed = 0;
193 struct acpi_resource *resource_end;
191 acpi_rs_length total_size; 194 acpi_rs_length total_size;
192 195
193 ACPI_FUNCTION_TRACE(rs_get_aml_length); 196 ACPI_FUNCTION_TRACE(rs_get_aml_length);
194 197
195 /* Traverse entire list of internal resource descriptors */ 198 /* Traverse entire list of internal resource descriptors */
196 199
197 while (resource) { 200 resource_end =
201 ACPI_ADD_PTR(struct acpi_resource, resource, resource_list_size);
202 while (resource < resource_end) {
198 203
199 /* Validate the descriptor type */ 204 /* Validate the descriptor type */
200 205
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 65f3e1c5b598..3a2ace93e62c 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -418,22 +418,21 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
418 * 418 *
419 * FUNCTION: acpi_rs_create_aml_resources 419 * FUNCTION: acpi_rs_create_aml_resources
420 * 420 *
421 * PARAMETERS: linked_list_buffer - Pointer to the resource linked list 421 * PARAMETERS: resource_list - Pointer to the resource list buffer
422 * output_buffer - Pointer to the user's buffer 422 * output_buffer - Where the AML buffer is returned
423 * 423 *
424 * RETURN: Status AE_OK if okay, else a valid acpi_status code. 424 * RETURN: Status AE_OK if okay, else a valid acpi_status code.
425 * If the output_buffer is too small, the error will be 425 * If the output_buffer is too small, the error will be
426 * AE_BUFFER_OVERFLOW and output_buffer->Length will point 426 * AE_BUFFER_OVERFLOW and output_buffer->Length will point
427 * to the size buffer needed. 427 * to the size buffer needed.
428 * 428 *
429 * DESCRIPTION: Takes the linked list of device resources and 429 * DESCRIPTION: Converts a list of device resources to an AML bytestream
430 * creates a bytestream to be used as input for the 430 * to be used as input for the _SRS control method.
431 * _SRS control method.
432 * 431 *
433 ******************************************************************************/ 432 ******************************************************************************/
434 433
435acpi_status 434acpi_status
436acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer, 435acpi_rs_create_aml_resources(struct acpi_buffer *resource_list,
437 struct acpi_buffer *output_buffer) 436 struct acpi_buffer *output_buffer)
438{ 437{
439 acpi_status status; 438 acpi_status status;
@@ -441,16 +440,16 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
441 440
442 ACPI_FUNCTION_TRACE(rs_create_aml_resources); 441 ACPI_FUNCTION_TRACE(rs_create_aml_resources);
443 442
444 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n", 443 /* Params already validated, no need to re-validate here */
445 linked_list_buffer));
446 444
447 /* 445 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ResourceList Buffer = %p\n",
448 * Params already validated, so we don't re-validate here 446 resource_list->pointer));
449 * 447
450 * Pass the linked_list_buffer into a module that calculates 448 /* Get the buffer size needed for the AML byte stream */
451 * the buffer size needed for the byte stream. 449
452 */ 450 status = acpi_rs_get_aml_length(resource_list->pointer,
453 status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed); 451 resource_list->length,
452 &aml_size_needed);
454 453
455 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n", 454 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
456 (u32)aml_size_needed, acpi_format_exception(status))); 455 (u32)aml_size_needed, acpi_format_exception(status)));
@@ -467,10 +466,9 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
467 466
468 /* Do the conversion */ 467 /* Do the conversion */
469 468
470 status = 469 status = acpi_rs_convert_resources_to_aml(resource_list->pointer,
471 acpi_rs_convert_resources_to_aml(linked_list_buffer, 470 aml_size_needed,
472 aml_size_needed, 471 output_buffer->pointer);
473 output_buffer->pointer);
474 if (ACPI_FAILURE(status)) { 472 if (ACPI_FAILURE(status)) {
475 return_ACPI_STATUS(status); 473 return_ACPI_STATUS(status);
476 } 474 }
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index aef303d56d86..14a7982c9961 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -753,7 +753,7 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
753 * Convert the linked list into a byte stream 753 * Convert the linked list into a byte stream
754 */ 754 */
755 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; 755 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
756 status = acpi_rs_create_aml_resources(in_buffer->pointer, &buffer); 756 status = acpi_rs_create_aml_resources(in_buffer, &buffer);
757 if (ACPI_FAILURE(status)) { 757 if (ACPI_FAILURE(status)) {
758 goto cleanup; 758 goto cleanup;
759 } 759 }
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 1a67b3944b3b..03ae8affe48f 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -185,6 +185,7 @@ acpi_debug_print(u32 requested_debug_level,
185 } 185 }
186 186
187 acpi_gbl_prev_thread_id = thread_id; 187 acpi_gbl_prev_thread_id = thread_id;
188 acpi_gbl_nesting_level = 0;
188 } 189 }
189 190
190 /* 191 /*
@@ -193,13 +194,21 @@ acpi_debug_print(u32 requested_debug_level,
193 */ 194 */
194 acpi_os_printf("%9s-%04ld ", module_name, line_number); 195 acpi_os_printf("%9s-%04ld ", module_name, line_number);
195 196
197#ifdef ACPI_EXEC_APP
198 /*
199 * For acpi_exec only, emit the thread ID and nesting level.
200 * Note: nesting level is really only useful during a single-thread
201 * execution. Otherwise, multiple threads will keep resetting the
202 * level.
203 */
196 if (ACPI_LV_THREADS & acpi_dbg_level) { 204 if (ACPI_LV_THREADS & acpi_dbg_level) {
197 acpi_os_printf("[%u] ", (u32)thread_id); 205 acpi_os_printf("[%u] ", (u32)thread_id);
198 } 206 }
199 207
200 acpi_os_printf("[%02ld] %-22.22s: ", 208 acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level);
201 acpi_gbl_nesting_level, 209#endif
202 acpi_ut_trim_function_name(function_name)); 210
211 acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name));
203 212
204 va_start(args, format); 213 va_start(args, format);
205 acpi_os_vprintf(format, args); 214 acpi_os_vprintf(format, args);
@@ -420,7 +429,9 @@ acpi_ut_exit(u32 line_number,
420 component_id, "%s\n", acpi_gbl_fn_exit_str); 429 component_id, "%s\n", acpi_gbl_fn_exit_str);
421 } 430 }
422 431
423 acpi_gbl_nesting_level--; 432 if (acpi_gbl_nesting_level) {
433 acpi_gbl_nesting_level--;
434 }
424} 435}
425 436
426ACPI_EXPORT_SYMBOL(acpi_ut_exit) 437ACPI_EXPORT_SYMBOL(acpi_ut_exit)
@@ -467,7 +478,9 @@ acpi_ut_status_exit(u32 line_number,
467 } 478 }
468 } 479 }
469 480
470 acpi_gbl_nesting_level--; 481 if (acpi_gbl_nesting_level) {
482 acpi_gbl_nesting_level--;
483 }
471} 484}
472 485
473ACPI_EXPORT_SYMBOL(acpi_ut_status_exit) 486ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
@@ -504,7 +517,9 @@ acpi_ut_value_exit(u32 line_number,
504 ACPI_FORMAT_UINT64(value)); 517 ACPI_FORMAT_UINT64(value));
505 } 518 }
506 519
507 acpi_gbl_nesting_level--; 520 if (acpi_gbl_nesting_level) {
521 acpi_gbl_nesting_level--;
522 }
508} 523}
509 524
510ACPI_EXPORT_SYMBOL(acpi_ut_value_exit) 525ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
@@ -540,7 +555,9 @@ acpi_ut_ptr_exit(u32 line_number,
540 ptr); 555 ptr);
541 } 556 }
542 557
543 acpi_gbl_nesting_level--; 558 if (acpi_gbl_nesting_level) {
559 acpi_gbl_nesting_level--;
560 }
544} 561}
545 562
546#endif 563#endif
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index fb848378d582..078c4f7fe2dd 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -75,39 +75,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
75 {""} 75 {""}
76}; 76};
77 77
78#if CONFIG_ACPI_BLACKLIST_YEAR
79
80static int __init blacklist_by_year(void)
81{
82 int year;
83
84 /* Doesn't exist? Likely an old system */
85 if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) {
86 printk(KERN_ERR PREFIX "no DMI BIOS year, "
87 "acpi=force is required to enable ACPI\n" );
88 return 1;
89 }
90 /* 0? Likely a buggy new BIOS */
91 if (year == 0) {
92 printk(KERN_ERR PREFIX "DMI BIOS year==0, "
93 "assuming ACPI-capable machine\n" );
94 return 0;
95 }
96 if (year < CONFIG_ACPI_BLACKLIST_YEAR) {
97 printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), "
98 "acpi=force is required to enable ACPI\n",
99 year, CONFIG_ACPI_BLACKLIST_YEAR);
100 return 1;
101 }
102 return 0;
103}
104#else
105static inline int blacklist_by_year(void)
106{
107 return 0;
108}
109#endif
110
111int __init acpi_blacklisted(void) 78int __init acpi_blacklisted(void)
112{ 79{
113 int i = 0; 80 int i = 0;
@@ -166,8 +133,6 @@ int __init acpi_blacklisted(void)
166 } 133 }
167 } 134 }
168 135
169 blacklisted += blacklist_by_year();
170
171 dmi_check_system(acpi_osi_dmi_table); 136 dmi_check_system(acpi_osi_dmi_table);
172 137
173 return blacklisted; 138 return blacklisted;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index d42b2fb5a7e9..b3480cf7db1a 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -22,16 +22,12 @@
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */ 23 */
24 24
25#include <linux/device.h> 25#include <linux/acpi.h>
26#include <linux/export.h> 26#include <linux/export.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/pm_qos.h> 28#include <linux/pm_qos.h>
29#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
30 30
31#include <acpi/acpi.h>
32#include <acpi/acpi_bus.h>
33#include <acpi/acpi_drivers.h>
34
35#include "internal.h" 31#include "internal.h"
36 32
37#define _COMPONENT ACPI_POWER_COMPONENT 33#define _COMPONENT ACPI_POWER_COMPONENT
@@ -548,7 +544,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
548 */ 544 */
549int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) 545int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
550{ 546{
551 acpi_handle handle = DEVICE_ACPI_HANDLE(dev); 547 acpi_handle handle = ACPI_HANDLE(dev);
552 struct acpi_device *adev; 548 struct acpi_device *adev;
553 int ret, d_min, d_max; 549 int ret, d_min, d_max;
554 550
@@ -656,7 +652,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
656 if (!device_run_wake(phys_dev)) 652 if (!device_run_wake(phys_dev))
657 return -EINVAL; 653 return -EINVAL;
658 654
659 handle = DEVICE_ACPI_HANDLE(phys_dev); 655 handle = ACPI_HANDLE(phys_dev);
660 if (!handle || acpi_bus_get_device(handle, &adev)) { 656 if (!handle || acpi_bus_get_device(handle, &adev)) {
661 dev_dbg(phys_dev, "ACPI handle without context in %s!\n", 657 dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
662 __func__); 658 __func__);
@@ -700,7 +696,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
700 if (!device_can_wakeup(dev)) 696 if (!device_can_wakeup(dev))
701 return -EINVAL; 697 return -EINVAL;
702 698
703 handle = DEVICE_ACPI_HANDLE(dev); 699 handle = ACPI_HANDLE(dev);
704 if (!handle || acpi_bus_get_device(handle, &adev)) { 700 if (!handle || acpi_bus_get_device(handle, &adev)) {
705 dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); 701 dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
706 return -ENODEV; 702 return -ENODEV;
@@ -722,7 +718,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
722 */ 718 */
723struct acpi_device *acpi_dev_pm_get_node(struct device *dev) 719struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
724{ 720{
725 acpi_handle handle = DEVICE_ACPI_HANDLE(dev); 721 acpi_handle handle = ACPI_HANDLE(dev);
726 struct acpi_device *adev; 722 struct acpi_device *adev;
727 723
728 return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL; 724 return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d5309fd49458..ba5b56db9d27 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -173,9 +173,10 @@ static void start_transaction(struct acpi_ec *ec)
173static void advance_transaction(struct acpi_ec *ec, u8 status) 173static void advance_transaction(struct acpi_ec *ec, u8 status)
174{ 174{
175 unsigned long flags; 175 unsigned long flags;
176 struct transaction *t = ec->curr; 176 struct transaction *t;
177 177
178 spin_lock_irqsave(&ec->lock, flags); 178 spin_lock_irqsave(&ec->lock, flags);
179 t = ec->curr;
179 if (!t) 180 if (!t)
180 goto unlock; 181 goto unlock;
181 if (t->wlen > t->wi) { 182 if (t->wlen > t->wi) {
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index fdef416c0ff6..cae3b387b867 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -78,15 +78,17 @@ enum {
78#define ACPI_GENL_VERSION 0x01 78#define ACPI_GENL_VERSION 0x01
79#define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group" 79#define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group"
80 80
81static const struct genl_multicast_group acpi_event_mcgrps[] = {
82 { .name = ACPI_GENL_MCAST_GROUP_NAME, },
83};
84
81static struct genl_family acpi_event_genl_family = { 85static struct genl_family acpi_event_genl_family = {
82 .id = GENL_ID_GENERATE, 86 .id = GENL_ID_GENERATE,
83 .name = ACPI_GENL_FAMILY_NAME, 87 .name = ACPI_GENL_FAMILY_NAME,
84 .version = ACPI_GENL_VERSION, 88 .version = ACPI_GENL_VERSION,
85 .maxattr = ACPI_GENL_ATTR_MAX, 89 .maxattr = ACPI_GENL_ATTR_MAX,
86}; 90 .mcgrps = acpi_event_mcgrps,
87 91 .n_mcgrps = ARRAY_SIZE(acpi_event_mcgrps),
88static struct genl_multicast_group acpi_event_mcgrp = {
89 .name = ACPI_GENL_MCAST_GROUP_NAME,
90}; 92};
91 93
92int acpi_bus_generate_netlink_event(const char *device_class, 94int acpi_bus_generate_netlink_event(const char *device_class,
@@ -141,7 +143,7 @@ int acpi_bus_generate_netlink_event(const char *device_class,
141 return result; 143 return result;
142 } 144 }
143 145
144 genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC); 146 genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC);
145 return 0; 147 return 0;
146} 148}
147 149
@@ -149,18 +151,7 @@ EXPORT_SYMBOL(acpi_bus_generate_netlink_event);
149 151
150static int acpi_event_genetlink_init(void) 152static int acpi_event_genetlink_init(void)
151{ 153{
152 int result; 154 return genl_register_family(&acpi_event_genl_family);
153
154 result = genl_register_family(&acpi_event_genl_family);
155 if (result)
156 return result;
157
158 result = genl_register_mc_group(&acpi_event_genl_family,
159 &acpi_event_mcgrp);
160 if (result)
161 genl_unregister_family(&acpi_event_genl_family);
162
163 return result;
164} 155}
165 156
166#else 157#else
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 10f0f40587bb..a22a295edb69 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -197,30 +197,28 @@ static void acpi_physnode_link_name(char *buf, unsigned int node_id)
197 197
198int acpi_bind_one(struct device *dev, acpi_handle handle) 198int acpi_bind_one(struct device *dev, acpi_handle handle)
199{ 199{
200 struct acpi_device *acpi_dev; 200 struct acpi_device *acpi_dev = NULL;
201 acpi_status status;
202 struct acpi_device_physical_node *physical_node, *pn; 201 struct acpi_device_physical_node *physical_node, *pn;
203 char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; 202 char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
204 struct list_head *physnode_list; 203 struct list_head *physnode_list;
205 unsigned int node_id; 204 unsigned int node_id;
206 int retval = -EINVAL; 205 int retval = -EINVAL;
207 206
208 if (ACPI_HANDLE(dev)) { 207 if (ACPI_COMPANION(dev)) {
209 if (handle) { 208 if (handle) {
210 dev_warn(dev, "ACPI handle is already set\n"); 209 dev_warn(dev, "ACPI companion already set\n");
211 return -EINVAL; 210 return -EINVAL;
212 } else { 211 } else {
213 handle = ACPI_HANDLE(dev); 212 acpi_dev = ACPI_COMPANION(dev);
214 } 213 }
214 } else {
215 acpi_bus_get_device(handle, &acpi_dev);
215 } 216 }
216 if (!handle) 217 if (!acpi_dev)
217 return -EINVAL; 218 return -EINVAL;
218 219
220 get_device(&acpi_dev->dev);
219 get_device(dev); 221 get_device(dev);
220 status = acpi_bus_get_device(handle, &acpi_dev);
221 if (ACPI_FAILURE(status))
222 goto err;
223
224 physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); 222 physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL);
225 if (!physical_node) { 223 if (!physical_node) {
226 retval = -ENOMEM; 224 retval = -ENOMEM;
@@ -242,10 +240,11 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
242 240
243 dev_warn(dev, "Already associated with ACPI node\n"); 241 dev_warn(dev, "Already associated with ACPI node\n");
244 kfree(physical_node); 242 kfree(physical_node);
245 if (ACPI_HANDLE(dev) != handle) 243 if (ACPI_COMPANION(dev) != acpi_dev)
246 goto err; 244 goto err;
247 245
248 put_device(dev); 246 put_device(dev);
247 put_device(&acpi_dev->dev);
249 return 0; 248 return 0;
250 } 249 }
251 if (pn->node_id == node_id) { 250 if (pn->node_id == node_id) {
@@ -259,8 +258,8 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
259 list_add(&physical_node->node, physnode_list); 258 list_add(&physical_node->node, physnode_list);
260 acpi_dev->physical_node_count++; 259 acpi_dev->physical_node_count++;
261 260
262 if (!ACPI_HANDLE(dev)) 261 if (!ACPI_COMPANION(dev))
263 ACPI_HANDLE_SET(dev, acpi_dev->handle); 262 ACPI_COMPANION_SET(dev, acpi_dev);
264 263
265 acpi_physnode_link_name(physical_node_name, node_id); 264 acpi_physnode_link_name(physical_node_name, node_id);
266 retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, 265 retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
@@ -283,27 +282,21 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
283 return 0; 282 return 0;
284 283
285 err: 284 err:
286 ACPI_HANDLE_SET(dev, NULL); 285 ACPI_COMPANION_SET(dev, NULL);
287 put_device(dev); 286 put_device(dev);
287 put_device(&acpi_dev->dev);
288 return retval; 288 return retval;
289} 289}
290EXPORT_SYMBOL_GPL(acpi_bind_one); 290EXPORT_SYMBOL_GPL(acpi_bind_one);
291 291
292int acpi_unbind_one(struct device *dev) 292int acpi_unbind_one(struct device *dev)
293{ 293{
294 struct acpi_device *acpi_dev = ACPI_COMPANION(dev);
294 struct acpi_device_physical_node *entry; 295 struct acpi_device_physical_node *entry;
295 struct acpi_device *acpi_dev;
296 acpi_status status;
297 296
298 if (!ACPI_HANDLE(dev)) 297 if (!acpi_dev)
299 return 0; 298 return 0;
300 299
301 status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev);
302 if (ACPI_FAILURE(status)) {
303 dev_err(dev, "Oops, ACPI handle corrupt in %s()\n", __func__);
304 return -EINVAL;
305 }
306
307 mutex_lock(&acpi_dev->physical_node_lock); 300 mutex_lock(&acpi_dev->physical_node_lock);
308 301
309 list_for_each_entry(entry, &acpi_dev->physical_node_list, node) 302 list_for_each_entry(entry, &acpi_dev->physical_node_list, node)
@@ -316,9 +309,10 @@ int acpi_unbind_one(struct device *dev)
316 acpi_physnode_link_name(physnode_name, entry->node_id); 309 acpi_physnode_link_name(physnode_name, entry->node_id);
317 sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name); 310 sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name);
318 sysfs_remove_link(&dev->kobj, "firmware_node"); 311 sysfs_remove_link(&dev->kobj, "firmware_node");
319 ACPI_HANDLE_SET(dev, NULL); 312 ACPI_COMPANION_SET(dev, NULL);
320 /* acpi_bind_one() increase refcnt by one. */ 313 /* Drop references taken by acpi_bind_one(). */
321 put_device(dev); 314 put_device(dev);
315 put_device(&acpi_dev->dev);
322 kfree(entry); 316 kfree(entry);
323 break; 317 break;
324 } 318 }
@@ -328,6 +322,15 @@ int acpi_unbind_one(struct device *dev)
328} 322}
329EXPORT_SYMBOL_GPL(acpi_unbind_one); 323EXPORT_SYMBOL_GPL(acpi_unbind_one);
330 324
325void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr)
326{
327 struct acpi_device *adev;
328
329 if (!acpi_bus_get_device(acpi_get_child(parent, addr), &adev))
330 ACPI_COMPANION_SET(dev, adev);
331}
332EXPORT_SYMBOL_GPL(acpi_preset_companion);
333
331static int acpi_platform_notify(struct device *dev) 334static int acpi_platform_notify(struct device *dev)
332{ 335{
333 struct acpi_bus_type *type = acpi_get_bus_type(dev); 336 struct acpi_bus_type *type = acpi_get_bus_type(dev);
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 266bc58ce0ce..386a9fe497b4 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -13,7 +13,6 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/acpi_io.h> 15#include <linux/acpi_io.h>
16#include <acpi/acpiosxf.h>
17 16
18/* ACPI NVS regions, APEI may use it */ 17/* ACPI NVS regions, APEI may use it */
19 18
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 56f05869b08d..20360e480bd8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -65,6 +65,9 @@ static struct acpi_scan_handler pci_root_handler = {
65 .ids = root_device_ids, 65 .ids = root_device_ids,
66 .attach = acpi_pci_root_add, 66 .attach = acpi_pci_root_add,
67 .detach = acpi_pci_root_remove, 67 .detach = acpi_pci_root_remove,
68 .hotplug = {
69 .ignore = true,
70 },
68}; 71};
69 72
70static DEFINE_MUTEX(osc_lock); 73static DEFINE_MUTEX(osc_lock);
@@ -575,6 +578,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
575 dev_err(&device->dev, 578 dev_err(&device->dev,
576 "Bus %04x:%02x not present in PCI namespace\n", 579 "Bus %04x:%02x not present in PCI namespace\n",
577 root->segment, (unsigned int)root->secondary.start); 580 root->segment, (unsigned int)root->secondary.start);
581 device->driver_data = NULL;
578 result = -ENODEV; 582 result = -ENODEV;
579 goto end; 583 goto end;
580 } 584 }
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 55f9dedbbf9f..fd39459926b1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -289,24 +289,17 @@ void acpi_bus_device_eject(void *data, u32 ost_src)
289{ 289{
290 struct acpi_device *device = data; 290 struct acpi_device *device = data;
291 acpi_handle handle = device->handle; 291 acpi_handle handle = device->handle;
292 struct acpi_scan_handler *handler;
293 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; 292 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
294 int error; 293 int error;
295 294
296 lock_device_hotplug(); 295 lock_device_hotplug();
297 mutex_lock(&acpi_scan_lock); 296 mutex_lock(&acpi_scan_lock);
298 297
299 handler = device->handler;
300 if (!handler || !handler->hotplug.enabled) {
301 put_device(&device->dev);
302 goto err_support;
303 }
304
305 if (ost_src == ACPI_NOTIFY_EJECT_REQUEST) 298 if (ost_src == ACPI_NOTIFY_EJECT_REQUEST)
306 acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, 299 acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
307 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); 300 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
308 301
309 if (handler->hotplug.mode == AHM_CONTAINER) 302 if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER)
310 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); 303 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
311 304
312 error = acpi_scan_hot_remove(device); 305 error = acpi_scan_hot_remove(device);
@@ -411,8 +404,7 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
411 break; 404 break;
412 case ACPI_NOTIFY_EJECT_REQUEST: 405 case ACPI_NOTIFY_EJECT_REQUEST:
413 acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); 406 acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
414 status = acpi_bus_get_device(handle, &adev); 407 if (acpi_bus_get_device(handle, &adev))
415 if (ACPI_FAILURE(status))
416 goto err_out; 408 goto err_out;
417 409
418 get_device(&adev->dev); 410 get_device(&adev->dev);
@@ -1780,7 +1772,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
1780 */ 1772 */
1781 list_for_each_entry(hwid, &pnp.ids, list) { 1773 list_for_each_entry(hwid, &pnp.ids, list) {
1782 handler = acpi_scan_match_handler(hwid->id, NULL); 1774 handler = acpi_scan_match_handler(hwid->id, NULL);
1783 if (handler) { 1775 if (handler && !handler->hotplug.ignore) {
1784 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, 1776 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
1785 acpi_hotplug_notify_cb, handler); 1777 acpi_hotplug_notify_cb, handler);
1786 break; 1778 break;
@@ -1997,6 +1989,7 @@ static int acpi_bus_scan_fixed(void)
1997 if (result) 1989 if (result)
1998 return result; 1990 return result;
1999 1991
1992 device->flags.match_driver = true;
2000 result = device_attach(&device->dev); 1993 result = device_attach(&device->dev);
2001 if (result < 0) 1994 if (result < 0)
2002 return result; 1995 return result;
@@ -2013,6 +2006,7 @@ static int acpi_bus_scan_fixed(void)
2013 if (result) 2006 if (result)
2014 return result; 2007 return result;
2015 2008
2009 device->flags.match_driver = true;
2016 result = device_attach(&device->dev); 2010 result = device_attach(&device->dev);
2017 } 2011 }
2018 2012
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 14df30580e15..721e949e606e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -525,7 +525,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
525 * generate wakeup events. 525 * generate wakeup events.
526 */ 526 */
527 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { 527 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
528 acpi_event_status pwr_btn_status; 528 acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
529 529
530 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); 530 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
531 531
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index db5293650f62..6dbc3ca45223 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -309,7 +309,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
309 sprintf(table_attr->name + ACPI_NAME_SIZE, "%d", 309 sprintf(table_attr->name + ACPI_NAME_SIZE, "%d",
310 table_attr->instance); 310 table_attr->instance);
311 311
312 table_attr->attr.size = 0; 312 table_attr->attr.size = table_header->length;
313 table_attr->attr.read = acpi_table_show; 313 table_attr->attr.read = acpi_table_show;
314 table_attr->attr.attr.name = table_attr->name; 314 table_attr->attr.attr.name = table_attr->name;
315 table_attr->attr.attr.mode = 0400; 315 table_attr->attr.attr.mode = 0400;
@@ -354,8 +354,9 @@ static int acpi_tables_sysfs_init(void)
354{ 354{
355 struct acpi_table_attr *table_attr; 355 struct acpi_table_attr *table_attr;
356 struct acpi_table_header *table_header = NULL; 356 struct acpi_table_header *table_header = NULL;
357 int table_index = 0; 357 int table_index;
358 int result; 358 acpi_status status;
359 int ret;
359 360
360 tables_kobj = kobject_create_and_add("tables", acpi_kobj); 361 tables_kobj = kobject_create_and_add("tables", acpi_kobj);
361 if (!tables_kobj) 362 if (!tables_kobj)
@@ -365,33 +366,34 @@ static int acpi_tables_sysfs_init(void)
365 if (!dynamic_tables_kobj) 366 if (!dynamic_tables_kobj)
366 goto err_dynamic_tables; 367 goto err_dynamic_tables;
367 368
368 do { 369 for (table_index = 0;; table_index++) {
369 result = acpi_get_table_by_index(table_index, &table_header); 370 status = acpi_get_table_by_index(table_index, &table_header);
370 if (!result) { 371
371 table_index++; 372 if (status == AE_BAD_PARAMETER)
372 table_attr = NULL; 373 break;
373 table_attr = 374
374 kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL); 375 if (ACPI_FAILURE(status))
375 if (!table_attr) 376 continue;
376 return -ENOMEM; 377
377 378 table_attr = NULL;
378 acpi_table_attr_init(table_attr, table_header); 379 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
379 result = 380 if (!table_attr)
380 sysfs_create_bin_file(tables_kobj, 381 return -ENOMEM;
381 &table_attr->attr); 382
382 if (result) { 383 acpi_table_attr_init(table_attr, table_header);
383 kfree(table_attr); 384 ret = sysfs_create_bin_file(tables_kobj, &table_attr->attr);
384 return result; 385 if (ret) {
385 } else 386 kfree(table_attr);
386 list_add_tail(&table_attr->node, 387 return ret;
387 &acpi_table_attr_list);
388 } 388 }
389 } while (!result); 389 list_add_tail(&table_attr->node, &acpi_table_attr_list);
390 }
391
390 kobject_uevent(tables_kobj, KOBJ_ADD); 392 kobject_uevent(tables_kobj, KOBJ_ADD);
391 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD); 393 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
392 result = acpi_install_table_handler(acpi_sysfs_table_handler, NULL); 394 status = acpi_install_table_handler(acpi_sysfs_table_handler, NULL);
393 395
394 return result == AE_OK ? 0 : -EINVAL; 396 return ACPI_FAILURE(status) ? -EINVAL : 0;
395err_dynamic_tables: 397err_dynamic_tables:
396 kobject_put(tables_kobj); 398 kobject_put(tables_kobj);
397err: 399err:
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 18dbdff4656e..995e91bcb97b 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -82,13 +82,6 @@ static bool allow_duplicates;
82module_param(allow_duplicates, bool, 0644); 82module_param(allow_duplicates, bool, 0644);
83 83
84/* 84/*
85 * Some BIOSes claim they use minimum backlight at boot,
86 * and this may bring dimming screen after boot
87 */
88static bool use_bios_initial_backlight = 1;
89module_param(use_bios_initial_backlight, bool, 0644);
90
91/*
92 * For Windows 8 systems: if set ture and the GPU driver has 85 * For Windows 8 systems: if set ture and the GPU driver has
93 * registered a backlight interface, skip registering ACPI video's. 86 * registered a backlight interface, skip registering ACPI video's.
94 */ 87 */
@@ -406,12 +399,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
406 return 0; 399 return 0;
407} 400}
408 401
409static int video_ignore_initial_backlight(const struct dmi_system_id *d)
410{
411 use_bios_initial_backlight = 0;
412 return 0;
413}
414
415static struct dmi_system_id video_dmi_table[] __initdata = { 402static struct dmi_system_id video_dmi_table[] __initdata = {
416 /* 403 /*
417 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 404 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -456,54 +443,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
456 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), 443 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
457 }, 444 },
458 }, 445 },
459 {
460 .callback = video_ignore_initial_backlight,
461 .ident = "HP Folio 13-2000",
462 .matches = {
463 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
465 },
466 },
467 {
468 .callback = video_ignore_initial_backlight,
469 .ident = "Fujitsu E753",
470 .matches = {
471 DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"),
472 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"),
473 },
474 },
475 {
476 .callback = video_ignore_initial_backlight,
477 .ident = "HP Pavilion dm4",
478 .matches = {
479 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
480 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
481 },
482 },
483 {
484 .callback = video_ignore_initial_backlight,
485 .ident = "HP Pavilion g6 Notebook PC",
486 .matches = {
487 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
488 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
489 },
490 },
491 {
492 .callback = video_ignore_initial_backlight,
493 .ident = "HP 1000 Notebook PC",
494 .matches = {
495 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
496 DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
497 },
498 },
499 {
500 .callback = video_ignore_initial_backlight,
501 .ident = "HP Pavilion m4",
502 .matches = {
503 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
504 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
505 },
506 },
507 {} 446 {}
508}; 447};
509 448
@@ -839,20 +778,18 @@ acpi_video_init_brightness(struct acpi_video_device *device)
839 if (!device->cap._BQC) 778 if (!device->cap._BQC)
840 goto set_level; 779 goto set_level;
841 780
842 if (use_bios_initial_backlight) { 781 level = acpi_video_bqc_value_to_level(device, level_old);
843 level = acpi_video_bqc_value_to_level(device, level_old); 782 /*
844 /* 783 * On some buggy laptops, _BQC returns an uninitialized
845 * On some buggy laptops, _BQC returns an uninitialized 784 * value when invoked for the first time, i.e.
846 * value when invoked for the first time, i.e. 785 * level_old is invalid (no matter whether it's a level
847 * level_old is invalid (no matter whether it's a level 786 * or an index). Set the backlight to max_level in this case.
848 * or an index). Set the backlight to max_level in this case. 787 */
849 */ 788 for (i = 2; i < br->count; i++)
850 for (i = 2; i < br->count; i++) 789 if (level == br->levels[i])
851 if (level == br->levels[i]) 790 break;
852 break; 791 if (i == br->count || !level)
853 if (i == br->count || !level) 792 level = max_level;
854 level = max_level;
855 }
856 793
857set_level: 794set_level:
858 result = acpi_video_device_lcd_set_level(device, level); 795 result = acpi_video_device_lcd_set_level(device, level);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e2903d03180e..14f1e9506338 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -435,6 +435,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
435 .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ 435 .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
436 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), 436 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
437 .driver_data = board_ahci_yes_fbs }, 437 .driver_data = board_ahci_yes_fbs },
438 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
439 .driver_data = board_ahci_yes_fbs },
438 440
439 /* Promise */ 441 /* Promise */
440 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 442 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index f9554318504f..4b231baceb09 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -329,6 +329,7 @@ static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
329static const struct of_device_id ahci_of_match[] = { 329static const struct of_device_id ahci_of_match[] = {
330 { .compatible = "snps,spear-ahci", }, 330 { .compatible = "snps,spear-ahci", },
331 { .compatible = "snps,exynos5440-ahci", }, 331 { .compatible = "snps,exynos5440-ahci", },
332 { .compatible = "ibm,476gtr-ahci", },
332 {}, 333 {},
333}; 334};
334MODULE_DEVICE_TABLE(of, ahci_of_match); 335MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index ab714d2ad978..4372cfa883c9 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -185,7 +185,7 @@ void ata_acpi_bind_port(struct ata_port *ap)
185 if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle) 185 if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle)
186 return; 186 return;
187 187
188 ACPI_HANDLE_SET(&ap->tdev, acpi_get_child(host_handle, ap->port_no)); 188 acpi_preset_companion(&ap->tdev, host_handle, ap->port_no);
189 189
190 if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) 190 if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
191 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; 191 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
@@ -222,7 +222,7 @@ void ata_acpi_bind_dev(struct ata_device *dev)
222 parent_handle = port_handle; 222 parent_handle = port_handle;
223 } 223 }
224 224
225 ACPI_HANDLE_SET(&dev->tdev, acpi_get_child(parent_handle, adr)); 225 acpi_preset_companion(&dev->tdev, parent_handle, adr);
226 226
227 register_hotplug_dock_device(ata_dev_acpi_handle(dev), 227 register_hotplug_dock_device(ata_dev_acpi_handle(dev),
228 &ata_acpi_dev_dock_ops, dev, NULL, NULL); 228 &ata_acpi_dev_dock_ops, dev, NULL, NULL);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 81a94a3919db..75b93678bbcd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6304,10 +6304,9 @@ static void ata_port_detach(struct ata_port *ap)
6304 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6304 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6305 ata_tlink_delete(&ap->pmp_link[i]); 6305 ata_tlink_delete(&ap->pmp_link[i]);
6306 } 6306 }
6307 ata_tport_delete(ap);
6308
6309 /* remove the associated SCSI host */ 6307 /* remove the associated SCSI host */
6310 scsi_remove_host(ap->scsi_host); 6308 scsi_remove_host(ap->scsi_host);
6309 ata_tport_delete(ap);
6311} 6310}
6312 6311
6313/** 6312/**
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index db6dfcfa3e2e..ab58556d347c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3625,6 +3625,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
3625 shost->max_lun = 1; 3625 shost->max_lun = 1;
3626 shost->max_channel = 1; 3626 shost->max_channel = 1;
3627 shost->max_cmd_len = 16; 3627 shost->max_cmd_len = 16;
3628 shost->no_write_same = 1;
3628 3629
3629 /* Schedule policy is determined by ->qc_defer() 3630 /* Schedule policy is determined by ->qc_defer()
3630 * callback and it needs to see every deferred qc. 3631 * callback and it needs to see every deferred qc.
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 68f9e3293e9c..88949c6d55dd 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -88,15 +88,13 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
88static bool odd_can_poweroff(struct ata_device *ata_dev) 88static bool odd_can_poweroff(struct ata_device *ata_dev)
89{ 89{
90 acpi_handle handle; 90 acpi_handle handle;
91 acpi_status status;
92 struct acpi_device *acpi_dev; 91 struct acpi_device *acpi_dev;
93 92
94 handle = ata_dev_acpi_handle(ata_dev); 93 handle = ata_dev_acpi_handle(ata_dev);
95 if (!handle) 94 if (!handle)
96 return false; 95 return false;
97 96
98 status = acpi_bus_get_device(handle, &acpi_dev); 97 if (acpi_bus_get_device(handle, &acpi_dev))
99 if (ACPI_FAILURE(status))
100 return false; 98 return false;
101 99
102 return acpi_device_can_poweroff(acpi_dev); 100 return acpi_device_can_poweroff(acpi_dev);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 853f610af28f..73492dd4a4bc 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -319,6 +319,7 @@ static int cf_init(struct arasan_cf_dev *acdev)
319 ret = clk_set_rate(acdev->clk, 166000000); 319 ret = clk_set_rate(acdev->clk, 166000000);
320 if (ret) { 320 if (ret) {
321 dev_warn(acdev->host->dev, "clock set rate failed"); 321 dev_warn(acdev->host->dev, "clock set rate failed");
322 clk_disable_unprepare(acdev->clk);
322 return ret; 323 return ret;
323 } 324 }
324 325
@@ -396,8 +397,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
396 struct dma_async_tx_descriptor *tx; 397 struct dma_async_tx_descriptor *tx;
397 struct dma_chan *chan = acdev->dma_chan; 398 struct dma_chan *chan = acdev->dma_chan;
398 dma_cookie_t cookie; 399 dma_cookie_t cookie;
399 unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | 400 unsigned long flags = DMA_PREP_INTERRUPT;
400 DMA_COMPL_SKIP_DEST_UNMAP;
401 int ret = 0; 401 int ret = 0;
402 402
403 tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); 403 tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 272f00927761..1bdf104e90bb 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3511,7 +3511,7 @@ static int init_card(struct atm_dev *dev)
3511 tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */ 3511 tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */
3512 if (tmp) { 3512 if (tmp) {
3513 memcpy(card->atmdev->esi, tmp->dev_addr, 6); 3513 memcpy(card->atmdev->esi, tmp->dev_addr, 6);
3514 3514 dev_put(tmp);
3515 printk("%s: ESI %pM\n", card->name, card->atmdev->esi); 3515 printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
3516 } 3516 }
3517 /* 3517 /*
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 47051cd25113..3a94b799f166 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -432,7 +432,7 @@ struct platform_device *platform_device_register_full(
432 goto err_alloc; 432 goto err_alloc;
433 433
434 pdev->dev.parent = pdevinfo->parent; 434 pdev->dev.parent = pdevinfo->parent;
435 ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle); 435 ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion);
436 436
437 if (pdevinfo->dma_mask) { 437 if (pdevinfo->dma_mask) {
438 /* 438 /*
@@ -463,7 +463,7 @@ struct platform_device *platform_device_register_full(
463 ret = platform_device_add(pdev); 463 ret = platform_device_add(pdev);
464 if (ret) { 464 if (ret) {
465err: 465err:
466 ACPI_HANDLE_SET(&pdev->dev, NULL); 466 ACPI_COMPANION_SET(&pdev->dev, NULL);
467 kfree(pdev->dev.dma_mask); 467 kfree(pdev->dev.dma_mask);
468 468
469err_alloc: 469err_alloc:
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c12e9b9556be..1b41fca3d65a 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1350,6 +1350,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
1350 1350
1351 device_unlock(dev); 1351 device_unlock(dev);
1352 1352
1353 if (error)
1354 pm_runtime_put(dev);
1355
1353 return error; 1356 return error;
1354} 1357}
1355 1358
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 98745dd77e8c..81f977510775 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -40,7 +40,7 @@ static int regmap_mmio_gather_write(void *context,
40 40
41 BUG_ON(reg_size != 4); 41 BUG_ON(reg_size != 4);
42 42
43 if (ctx->clk) { 43 if (!IS_ERR(ctx->clk)) {
44 ret = clk_enable(ctx->clk); 44 ret = clk_enable(ctx->clk);
45 if (ret < 0) 45 if (ret < 0)
46 return ret; 46 return ret;
@@ -73,7 +73,7 @@ static int regmap_mmio_gather_write(void *context,
73 offset += ctx->val_bytes; 73 offset += ctx->val_bytes;
74 } 74 }
75 75
76 if (ctx->clk) 76 if (!IS_ERR(ctx->clk))
77 clk_disable(ctx->clk); 77 clk_disable(ctx->clk);
78 78
79 return 0; 79 return 0;
@@ -96,7 +96,7 @@ static int regmap_mmio_read(void *context,
96 96
97 BUG_ON(reg_size != 4); 97 BUG_ON(reg_size != 4);
98 98
99 if (ctx->clk) { 99 if (!IS_ERR(ctx->clk)) {
100 ret = clk_enable(ctx->clk); 100 ret = clk_enable(ctx->clk);
101 if (ret < 0) 101 if (ret < 0)
102 return ret; 102 return ret;
@@ -129,7 +129,7 @@ static int regmap_mmio_read(void *context,
129 offset += ctx->val_bytes; 129 offset += ctx->val_bytes;
130 } 130 }
131 131
132 if (ctx->clk) 132 if (!IS_ERR(ctx->clk))
133 clk_disable(ctx->clk); 133 clk_disable(ctx->clk);
134 134
135 return 0; 135 return 0;
@@ -139,7 +139,7 @@ static void regmap_mmio_free_context(void *context)
139{ 139{
140 struct regmap_mmio_context *ctx = context; 140 struct regmap_mmio_context *ctx = context;
141 141
142 if (ctx->clk) { 142 if (!IS_ERR(ctx->clk)) {
143 clk_unprepare(ctx->clk); 143 clk_unprepare(ctx->clk);
144 clk_put(ctx->clk); 144 clk_put(ctx->clk);
145 } 145 }
@@ -209,6 +209,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
209 209
210 ctx->regs = regs; 210 ctx->regs = regs;
211 ctx->val_bytes = config->val_bits / 8; 211 ctx->val_bytes = config->val_bits / 8;
212 ctx->clk = ERR_PTR(-ENODEV);
212 213
213 if (clk_id == NULL) 214 if (clk_id == NULL)
214 return ctx; 215 return ctx;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 9c021d9cace0..c2e002100949 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1549,7 +1549,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1549 val + (i * val_bytes), 1549 val + (i * val_bytes),
1550 val_bytes); 1550 val_bytes);
1551 if (ret != 0) 1551 if (ret != 0)
1552 return ret; 1552 goto out;
1553 } 1553 }
1554 } else { 1554 } else {
1555 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1555 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
@@ -1743,7 +1743,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
1743/** 1743/**
1744 * regmap_read(): Read a value from a single register 1744 * regmap_read(): Read a value from a single register
1745 * 1745 *
1746 * @map: Register map to write to 1746 * @map: Register map to read from
1747 * @reg: Register to be read from 1747 * @reg: Register to be read from
1748 * @val: Pointer to store read value 1748 * @val: Pointer to store read value
1749 * 1749 *
@@ -1770,7 +1770,7 @@ EXPORT_SYMBOL_GPL(regmap_read);
1770/** 1770/**
1771 * regmap_raw_read(): Read raw data from the device 1771 * regmap_raw_read(): Read raw data from the device
1772 * 1772 *
1773 * @map: Register map to write to 1773 * @map: Register map to read from
1774 * @reg: First register to be read from 1774 * @reg: First register to be read from
1775 * @val: Pointer to store read value 1775 * @val: Pointer to store read value
1776 * @val_len: Size of data to read 1776 * @val_len: Size of data to read
@@ -1882,7 +1882,7 @@ EXPORT_SYMBOL_GPL(regmap_fields_read);
1882/** 1882/**
1883 * regmap_bulk_read(): Read multiple registers from the device 1883 * regmap_bulk_read(): Read multiple registers from the device
1884 * 1884 *
1885 * @map: Register map to write to 1885 * @map: Register map to read from
1886 * @reg: First register to be read from 1886 * @reg: First register to be read from
1887 * @val: Pointer to store read value, in native register size for device 1887 * @val: Pointer to store read value, in native register size for device
1888 * @val_count: Number of registers to read 1888 * @val_count: Number of registers to read
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index b5d842370cc9..f370fc13aea5 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -223,7 +223,7 @@ static void null_softirq_done_fn(struct request *rq)
223 blk_end_request_all(rq, 0); 223 blk_end_request_all(rq, 0);
224} 224}
225 225
226#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) 226#ifdef CONFIG_SMP
227 227
228static void null_ipi_cmd_end_io(void *data) 228static void null_ipi_cmd_end_io(void *data)
229{ 229{
@@ -260,7 +260,7 @@ static void null_cmd_end_ipi(struct nullb_cmd *cmd)
260 put_cpu(); 260 put_cpu();
261} 261}
262 262
263#endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ 263#endif /* CONFIG_SMP */
264 264
265static inline void null_handle_cmd(struct nullb_cmd *cmd) 265static inline void null_handle_cmd(struct nullb_cmd *cmd)
266{ 266{
@@ -270,7 +270,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
270 end_cmd(cmd); 270 end_cmd(cmd);
271 break; 271 break;
272 case NULL_IRQ_SOFTIRQ: 272 case NULL_IRQ_SOFTIRQ:
273#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) 273#ifdef CONFIG_SMP
274 null_cmd_end_ipi(cmd); 274 null_cmd_end_ipi(cmd);
275#else 275#else
276 end_cmd(cmd); 276 end_cmd(cmd);
@@ -495,23 +495,23 @@ static int null_add_dev(void)
495 495
496 spin_lock_init(&nullb->lock); 496 spin_lock_init(&nullb->lock);
497 497
498 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
499 submit_queues = nr_online_nodes;
500
498 if (setup_queues(nullb)) 501 if (setup_queues(nullb))
499 goto err; 502 goto err;
500 503
501 if (queue_mode == NULL_Q_MQ) { 504 if (queue_mode == NULL_Q_MQ) {
502 null_mq_reg.numa_node = home_node; 505 null_mq_reg.numa_node = home_node;
503 null_mq_reg.queue_depth = hw_queue_depth; 506 null_mq_reg.queue_depth = hw_queue_depth;
507 null_mq_reg.nr_hw_queues = submit_queues;
504 508
505 if (use_per_node_hctx) { 509 if (use_per_node_hctx) {
506 null_mq_reg.ops->alloc_hctx = null_alloc_hctx; 510 null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
507 null_mq_reg.ops->free_hctx = null_free_hctx; 511 null_mq_reg.ops->free_hctx = null_free_hctx;
508
509 null_mq_reg.nr_hw_queues = nr_online_nodes;
510 } else { 512 } else {
511 null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; 513 null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
512 null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; 514 null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
513
514 null_mq_reg.nr_hw_queues = submit_queues;
515 } 515 }
516 516
517 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); 517 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
@@ -571,7 +571,7 @@ static int __init null_init(void)
571{ 571{
572 unsigned int i; 572 unsigned int i;
573 573
574#if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS) 574#if !defined(CONFIG_SMP)
575 if (irqmode == NULL_IRQ_SOFTIRQ) { 575 if (irqmode == NULL_IRQ_SOFTIRQ) {
576 pr_warn("null_blk: softirq completions not available.\n"); 576 pr_warn("null_blk: softirq completions not available.\n");
577 pr_warn("null_blk: using direct completions.\n"); 577 pr_warn("null_blk: using direct completions.\n");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 588479d58f52..6a680d4de7f1 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -199,15 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
199 199
200 spin_lock_irqsave(&vblk->vq_lock, flags); 200 spin_lock_irqsave(&vblk->vq_lock, flags);
201 if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { 201 if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
202 virtqueue_kick(vblk->vq);
202 spin_unlock_irqrestore(&vblk->vq_lock, flags); 203 spin_unlock_irqrestore(&vblk->vq_lock, flags);
203 blk_mq_stop_hw_queue(hctx); 204 blk_mq_stop_hw_queue(hctx);
204 virtqueue_kick(vblk->vq);
205 return BLK_MQ_RQ_QUEUE_BUSY; 205 return BLK_MQ_RQ_QUEUE_BUSY;
206 } 206 }
207 spin_unlock_irqrestore(&vblk->vq_lock, flags);
208 207
209 if (last) 208 if (last)
210 virtqueue_kick(vblk->vq); 209 virtqueue_kick(vblk->vq);
210
211 spin_unlock_irqrestore(&vblk->vq_lock, flags);
211 return BLK_MQ_RQ_QUEUE_OK; 212 return BLK_MQ_RQ_QUEUE_OK;
212} 213}
213 214
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 432db1b59b00..c4a4c9006288 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -489,7 +489,7 @@ static int blkif_queue_request(struct request *req)
489 489
490 if ((ring_req->operation == BLKIF_OP_INDIRECT) && 490 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
491 (i % SEGS_PER_INDIRECT_FRAME == 0)) { 491 (i % SEGS_PER_INDIRECT_FRAME == 0)) {
492 unsigned long pfn; 492 unsigned long uninitialized_var(pfn);
493 493
494 if (segments) 494 if (segments)
495 kunmap_atomic(segments); 495 kunmap_atomic(segments);
@@ -2011,6 +2011,10 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
2011 2011
2012 bdev = bdget_disk(disk, 0); 2012 bdev = bdget_disk(disk, 0);
2013 2013
2014 if (!bdev) {
2015 WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
2016 goto out_mutex;
2017 }
2014 if (bdev->bd_openers) 2018 if (bdev->bd_openers)
2015 goto out; 2019 goto out;
2016 2020
@@ -2041,6 +2045,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
2041 2045
2042out: 2046out:
2043 bdput(bdev); 2047 bdput(bdev);
2048out_mutex:
2044 mutex_unlock(&blkfront_mutex); 2049 mutex_unlock(&blkfront_mutex);
2045} 2050}
2046 2051
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c206de2951f2..2f2b08457c67 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -165,6 +165,19 @@ config HW_RANDOM_OMAP
165 165
166 If unsure, say Y. 166 If unsure, say Y.
167 167
168config HW_RANDOM_OMAP3_ROM
169 tristate "OMAP3 ROM Random Number Generator support"
170 depends on HW_RANDOM && ARCH_OMAP3
171 default HW_RANDOM
172 ---help---
173 This driver provides kernel-side support for the Random Number
174 Generator hardware found on OMAP34xx processors.
175
176 To compile this driver as a module, choose M here: the
177 module will be called omap3-rom-rng.
178
179 If unsure, say Y.
180
168config HW_RANDOM_OCTEON 181config HW_RANDOM_OCTEON
169 tristate "Octeon Random Number Generator support" 182 tristate "Octeon Random Number Generator support"
170 depends on HW_RANDOM && CAVIUM_OCTEON_SOC 183 depends on HW_RANDOM && CAVIUM_OCTEON_SOC
@@ -327,3 +340,15 @@ config HW_RANDOM_TPM
327 module will be called tpm-rng. 340 module will be called tpm-rng.
328 341
329 If unsure, say Y. 342 If unsure, say Y.
343
344config HW_RANDOM_MSM
345 tristate "Qualcomm MSM Random Number Generator support"
346 depends on HW_RANDOM && ARCH_MSM
347 ---help---
348 This driver provides kernel-side support for the Random Number
349 Generator hardware found on Qualcomm MSM SoCs.
350
351 To compile this driver as a module, choose M here. the
352 module will be called msm-rng.
353
354 If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index d7d2435ff7fa..3ae7755a52e7 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -15,6 +15,7 @@ n2-rng-y := n2-drv.o n2-asm.o
15obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o 15obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
16obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o 16obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
17obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o 17obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
18obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
18obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o 19obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
19obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o 20obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
20obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o 21obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
@@ -28,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
28obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o 29obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
29obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o 30obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
30obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o 31obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
32obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
new file mode 100644
index 000000000000..148521e51dc6
--- /dev/null
+++ b/drivers/char/hw_random/msm-rng.c
@@ -0,0 +1,197 @@
1/*
2 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/clk.h>
15#include <linux/err.h>
16#include <linux/hw_random.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/platform_device.h>
21
22/* Device specific register offsets */
23#define PRNG_DATA_OUT 0x0000
24#define PRNG_STATUS 0x0004
25#define PRNG_LFSR_CFG 0x0100
26#define PRNG_CONFIG 0x0104
27
28/* Device specific register masks and config values */
29#define PRNG_LFSR_CFG_MASK 0x0000ffff
30#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
31#define PRNG_CONFIG_HW_ENABLE BIT(1)
32#define PRNG_STATUS_DATA_AVAIL BIT(0)
33
34#define MAX_HW_FIFO_DEPTH 16
35#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4)
36#define WORD_SZ 4
37
38struct msm_rng {
39 void __iomem *base;
40 struct clk *clk;
41 struct hwrng hwrng;
42};
43
44#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng)
45
46static int msm_rng_enable(struct hwrng *hwrng, int enable)
47{
48 struct msm_rng *rng = to_msm_rng(hwrng);
49 u32 val;
50 int ret;
51
52 ret = clk_prepare_enable(rng->clk);
53 if (ret)
54 return ret;
55
56 if (enable) {
57 /* Enable PRNG only if it is not already enabled */
58 val = readl_relaxed(rng->base + PRNG_CONFIG);
59 if (val & PRNG_CONFIG_HW_ENABLE)
60 goto already_enabled;
61
62 val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
63 val &= ~PRNG_LFSR_CFG_MASK;
64 val |= PRNG_LFSR_CFG_CLOCKS;
65 writel(val, rng->base + PRNG_LFSR_CFG);
66
67 val = readl_relaxed(rng->base + PRNG_CONFIG);
68 val |= PRNG_CONFIG_HW_ENABLE;
69 writel(val, rng->base + PRNG_CONFIG);
70 } else {
71 val = readl_relaxed(rng->base + PRNG_CONFIG);
72 val &= ~PRNG_CONFIG_HW_ENABLE;
73 writel(val, rng->base + PRNG_CONFIG);
74 }
75
76already_enabled:
77 clk_disable_unprepare(rng->clk);
78 return 0;
79}
80
81static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
82{
83 struct msm_rng *rng = to_msm_rng(hwrng);
84 size_t currsize = 0;
85 u32 *retdata = data;
86 size_t maxsize;
87 int ret;
88 u32 val;
89
90 /* calculate max size bytes to transfer back to caller */
91 maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
92
93 /* no room for word data */
94 if (maxsize < WORD_SZ)
95 return 0;
96
97 ret = clk_prepare_enable(rng->clk);
98 if (ret)
99 return ret;
100
101 /* read random data from hardware */
102 do {
103 val = readl_relaxed(rng->base + PRNG_STATUS);
104 if (!(val & PRNG_STATUS_DATA_AVAIL))
105 break;
106
107 val = readl_relaxed(rng->base + PRNG_DATA_OUT);
108 if (!val)
109 break;
110
111 *retdata++ = val;
112 currsize += WORD_SZ;
113
114 /* make sure we stay on 32bit boundary */
115 if ((maxsize - currsize) < WORD_SZ)
116 break;
117 } while (currsize < maxsize);
118
119 clk_disable_unprepare(rng->clk);
120
121 return currsize;
122}
123
124static int msm_rng_init(struct hwrng *hwrng)
125{
126 return msm_rng_enable(hwrng, 1);
127}
128
129static void msm_rng_cleanup(struct hwrng *hwrng)
130{
131 msm_rng_enable(hwrng, 0);
132}
133
134static int msm_rng_probe(struct platform_device *pdev)
135{
136 struct resource *res;
137 struct msm_rng *rng;
138 int ret;
139
140 rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
141 if (!rng)
142 return -ENOMEM;
143
144 platform_set_drvdata(pdev, rng);
145
146 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
147 rng->base = devm_ioremap_resource(&pdev->dev, res);
148 if (IS_ERR(rng->base))
149 return PTR_ERR(rng->base);
150
151 rng->clk = devm_clk_get(&pdev->dev, "core");
152 if (IS_ERR(rng->clk))
153 return PTR_ERR(rng->clk);
154
155 rng->hwrng.name = KBUILD_MODNAME,
156 rng->hwrng.init = msm_rng_init,
157 rng->hwrng.cleanup = msm_rng_cleanup,
158 rng->hwrng.read = msm_rng_read,
159
160 ret = hwrng_register(&rng->hwrng);
161 if (ret) {
162 dev_err(&pdev->dev, "failed to register hwrng\n");
163 return ret;
164 }
165
166 return 0;
167}
168
169static int msm_rng_remove(struct platform_device *pdev)
170{
171 struct msm_rng *rng = platform_get_drvdata(pdev);
172
173 hwrng_unregister(&rng->hwrng);
174 return 0;
175}
176
177static const struct of_device_id msm_rng_of_match[] = {
178 { .compatible = "qcom,prng", },
179 {}
180};
181MODULE_DEVICE_TABLE(of, msm_rng_of_match);
182
183static struct platform_driver msm_rng_driver = {
184 .probe = msm_rng_probe,
185 .remove = msm_rng_remove,
186 .driver = {
187 .name = KBUILD_MODNAME,
188 .owner = THIS_MODULE,
189 .of_match_table = of_match_ptr(msm_rng_of_match),
190 }
191};
192module_platform_driver(msm_rng_driver);
193
194MODULE_ALIAS("platform:" KBUILD_MODNAME);
195MODULE_AUTHOR("The Linux Foundation");
196MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
197MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
new file mode 100644
index 000000000000..c853e9e68573
--- /dev/null
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -0,0 +1,141 @@
1/*
2 * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Juha Yrjola <juha.yrjola@solidboot.com>
6 *
7 * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/random.h>
19#include <linux/hw_random.h>
20#include <linux/timer.h>
21#include <linux/clk.h>
22#include <linux/err.h>
23#include <linux/platform_device.h>
24
25#define RNG_RESET 0x01
26#define RNG_GEN_PRNG_HW_INIT 0x02
27#define RNG_GEN_HW 0x08
28
29/* param1: ptr, param2: count, param3: flag */
30static u32 (*omap3_rom_rng_call)(u32, u32, u32);
31
32static struct timer_list idle_timer;
33static int rng_idle;
34static struct clk *rng_clk;
35
36static void omap3_rom_rng_idle(unsigned long data)
37{
38 int r;
39
40 r = omap3_rom_rng_call(0, 0, RNG_RESET);
41 if (r != 0) {
42 pr_err("reset failed: %d\n", r);
43 return;
44 }
45 clk_disable_unprepare(rng_clk);
46 rng_idle = 1;
47}
48
49static int omap3_rom_rng_get_random(void *buf, unsigned int count)
50{
51 u32 r;
52 u32 ptr;
53
54 del_timer_sync(&idle_timer);
55 if (rng_idle) {
56 clk_prepare_enable(rng_clk);
57 r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
58 if (r != 0) {
59 clk_disable_unprepare(rng_clk);
60 pr_err("HW init failed: %d\n", r);
61 return -EIO;
62 }
63 rng_idle = 0;
64 }
65
66 ptr = virt_to_phys(buf);
67 r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
68 mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500));
69 if (r != 0)
70 return -EINVAL;
71 return 0;
72}
73
74static int omap3_rom_rng_data_present(struct hwrng *rng, int wait)
75{
76 return 1;
77}
78
79static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
80{
81 int r;
82
83 r = omap3_rom_rng_get_random(data, 4);
84 if (r < 0)
85 return r;
86 return 4;
87}
88
89static struct hwrng omap3_rom_rng_ops = {
90 .name = "omap3-rom",
91 .data_present = omap3_rom_rng_data_present,
92 .data_read = omap3_rom_rng_data_read,
93};
94
95static int omap3_rom_rng_probe(struct platform_device *pdev)
96{
97 pr_info("initializing\n");
98
99 omap3_rom_rng_call = pdev->dev.platform_data;
100 if (!omap3_rom_rng_call) {
101 pr_err("omap3_rom_rng_call is NULL\n");
102 return -EINVAL;
103 }
104
105 setup_timer(&idle_timer, omap3_rom_rng_idle, 0);
106 rng_clk = clk_get(&pdev->dev, "ick");
107 if (IS_ERR(rng_clk)) {
108 pr_err("unable to get RNG clock\n");
109 return PTR_ERR(rng_clk);
110 }
111
112 /* Leave the RNG in reset state. */
113 clk_prepare_enable(rng_clk);
114 omap3_rom_rng_idle(0);
115
116 return hwrng_register(&omap3_rom_rng_ops);
117}
118
119static int omap3_rom_rng_remove(struct platform_device *pdev)
120{
121 hwrng_unregister(&omap3_rom_rng_ops);
122 clk_disable_unprepare(rng_clk);
123 clk_put(rng_clk);
124 return 0;
125}
126
127static struct platform_driver omap3_rom_rng_driver = {
128 .driver = {
129 .name = "omap3-rom-rng",
130 .owner = THIS_MODULE,
131 },
132 .probe = omap3_rom_rng_probe,
133 .remove = omap3_rom_rng_remove,
134};
135
136module_platform_driver(omap3_rom_rng_driver);
137
138MODULE_ALIAS("platform:omap3-rom-rng");
139MODULE_AUTHOR("Juha Yrjola");
140MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
141MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index b761459a3436..ab7ffdec0ec3 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -24,7 +24,6 @@
24#include <linux/hw_random.h> 24#include <linux/hw_random.h>
25#include <asm/vio.h> 25#include <asm/vio.h>
26 26
27#define MODULE_NAME "pseries-rng"
28 27
29static int pseries_rng_data_read(struct hwrng *rng, u32 *data) 28static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
30{ 29{
@@ -55,7 +54,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
55}; 54};
56 55
57static struct hwrng pseries_rng = { 56static struct hwrng pseries_rng = {
58 .name = MODULE_NAME, 57 .name = KBUILD_MODNAME,
59 .data_read = pseries_rng_data_read, 58 .data_read = pseries_rng_data_read,
60}; 59};
61 60
@@ -78,7 +77,7 @@ static struct vio_device_id pseries_rng_driver_ids[] = {
78MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids); 77MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
79 78
80static struct vio_driver pseries_rng_driver = { 79static struct vio_driver pseries_rng_driver = {
81 .name = MODULE_NAME, 80 .name = KBUILD_MODNAME,
82 .probe = pseries_rng_probe, 81 .probe = pseries_rng_probe,
83 .remove = pseries_rng_remove, 82 .remove = pseries_rng_remove,
84 .get_desired_dma = pseries_rng_get_desired_dma, 83 .get_desired_dma = pseries_rng_get_desired_dma,
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index e737772ad69a..de5a6dcfb3e2 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -221,7 +221,7 @@ static void __exit mod_exit(void)
221module_init(mod_init); 221module_init(mod_init);
222module_exit(mod_exit); 222module_exit(mod_exit);
223 223
224static struct x86_cpu_id via_rng_cpu_id[] = { 224static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
225 X86_FEATURE_MATCH(X86_FEATURE_XSTORE), 225 X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
226 {} 226 {}
227}; 227};
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 40cc0cf2ded6..e6939e13e338 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -664,6 +664,13 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
664 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"), 664 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"),
665 }, 665 },
666 }, 666 },
667 {
668 .ident = "Dell XPS421",
669 .matches = {
670 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
671 DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
672 },
673 },
667 { } 674 { }
668}; 675};
669 676
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 94c0c74434ea..1a65838888cd 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -33,6 +33,15 @@ config TCG_TIS
33 from within Linux. To compile this driver as a module, choose 33 from within Linux. To compile this driver as a module, choose
34 M here; the module will be called tpm_tis. 34 M here; the module will be called tpm_tis.
35 35
36config TCG_TIS_I2C_ATMEL
37 tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
38 depends on I2C
39 ---help---
40 If you have an Atmel I2C TPM security chip say Yes and it will be
41 accessible from within Linux.
42 To compile this driver as a module, choose M here; the module will
43 be called tpm_tis_i2c_atmel.
44
36config TCG_TIS_I2C_INFINEON 45config TCG_TIS_I2C_INFINEON
37 tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)" 46 tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)"
38 depends on I2C 47 depends on I2C
@@ -42,7 +51,17 @@ config TCG_TIS_I2C_INFINEON
42 Specification 0.20 say Yes and it will be accessible from within 51 Specification 0.20 say Yes and it will be accessible from within
43 Linux. 52 Linux.
44 To compile this driver as a module, choose M here; the module 53 To compile this driver as a module, choose M here; the module
45 will be called tpm_tis_i2c_infineon. 54 will be called tpm_i2c_infineon.
55
56config TCG_TIS_I2C_NUVOTON
57 tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)"
58 depends on I2C
59 ---help---
60 If you have a TPM security chip with an I2C interface from
61 Nuvoton Technology Corp. say Yes and it will be accessible
62 from within Linux.
63 To compile this driver as a module, choose M here; the module
64 will be called tpm_i2c_nuvoton.
46 65
47config TCG_NSC 66config TCG_NSC
48 tristate "National Semiconductor TPM Interface" 67 tristate "National Semiconductor TPM Interface"
@@ -82,14 +101,14 @@ config TCG_IBMVTPM
82 as a module, choose M here; the module will be called tpm_ibmvtpm. 101 as a module, choose M here; the module will be called tpm_ibmvtpm.
83 102
84config TCG_ST33_I2C 103config TCG_ST33_I2C
85 tristate "STMicroelectronics ST33 I2C TPM" 104 tristate "STMicroelectronics ST33 I2C TPM"
86 depends on I2C 105 depends on I2C
87 depends on GPIOLIB 106 depends on GPIOLIB
88 ---help--- 107 ---help---
89 If you have a TPM security chip from STMicroelectronics working with 108 If you have a TPM security chip from STMicroelectronics working with
90 an I2C bus say Yes and it will be accessible from within Linux. 109 an I2C bus say Yes and it will be accessible from within Linux.
91 To compile this driver as a module, choose M here; the module will be 110 To compile this driver as a module, choose M here; the module will be
92 called tpm_stm_st33_i2c. 111 called tpm_stm_st33_i2c.
93 112
94config TCG_XEN 113config TCG_XEN
95 tristate "XEN TPM Interface" 114 tristate "XEN TPM Interface"
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index eb41ff97d0ad..b80a4000daee 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,17 +2,20 @@
2# Makefile for the kernel tpm device drivers. 2# Makefile for the kernel tpm device drivers.
3# 3#
4obj-$(CONFIG_TCG_TPM) += tpm.o 4obj-$(CONFIG_TCG_TPM) += tpm.o
5tpm-y := tpm-interface.o
6tpm-$(CONFIG_ACPI) += tpm_ppi.o
7
5ifdef CONFIG_ACPI 8ifdef CONFIG_ACPI
6 obj-$(CONFIG_TCG_TPM) += tpm_bios.o 9 tpm-y += tpm_eventlog.o tpm_acpi.o
7 tpm_bios-objs += tpm_eventlog.o tpm_acpi.o tpm_ppi.o
8else 10else
9ifdef CONFIG_TCG_IBMVTPM 11ifdef CONFIG_TCG_IBMVTPM
10 obj-$(CONFIG_TCG_TPM) += tpm_bios.o 12 tpm-y += tpm_eventlog.o tpm_of.o
11 tpm_bios-objs += tpm_eventlog.o tpm_of.o
12endif 13endif
13endif 14endif
14obj-$(CONFIG_TCG_TIS) += tpm_tis.o 15obj-$(CONFIG_TCG_TIS) += tpm_tis.o
16obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
15obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o 17obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
18obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
16obj-$(CONFIG_TCG_NSC) += tpm_nsc.o 19obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
17obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o 20obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
18obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o 21obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm-interface.c
index e3c974a6c522..6ae41d337630 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -10,13 +10,13 @@
10 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 10 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
11 * 11 *
12 * Device driver for TCG/TCPA TPM (trusted platform module). 12 * Device driver for TCG/TCPA TPM (trusted platform module).
13 * Specifications at www.trustedcomputinggroup.org 13 * Specifications at www.trustedcomputinggroup.org
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as 16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation, version 2 of the 17 * published by the Free Software Foundation, version 2 of the
18 * License. 18 * License.
19 * 19 *
20 * Note, the TPM chip is not interrupt driven (only polling) 20 * Note, the TPM chip is not interrupt driven (only polling)
21 * and can have very long timeouts (minutes!). Hence the unusual 21 * and can have very long timeouts (minutes!). Hence the unusual
22 * calls to msleep. 22 * calls to msleep.
@@ -371,13 +371,14 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
371 return -ENODATA; 371 return -ENODATA;
372 if (count > bufsiz) { 372 if (count > bufsiz) {
373 dev_err(chip->dev, 373 dev_err(chip->dev,
374 "invalid count value %x %zx \n", count, bufsiz); 374 "invalid count value %x %zx\n", count, bufsiz);
375 return -E2BIG; 375 return -E2BIG;
376 } 376 }
377 377
378 mutex_lock(&chip->tpm_mutex); 378 mutex_lock(&chip->tpm_mutex);
379 379
380 if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) { 380 rc = chip->vendor.send(chip, (u8 *) buf, count);
381 if (rc < 0) {
381 dev_err(chip->dev, 382 dev_err(chip->dev,
382 "tpm_transmit: tpm_send: error %zd\n", rc); 383 "tpm_transmit: tpm_send: error %zd\n", rc);
383 goto out; 384 goto out;
@@ -444,7 +445,7 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
444{ 445{
445 int err; 446 int err;
446 447
447 len = tpm_transmit(chip,(u8 *) cmd, len); 448 len = tpm_transmit(chip, (u8 *) cmd, len);
448 if (len < 0) 449 if (len < 0)
449 return len; 450 return len;
450 else if (len < TPM_HEADER_SIZE) 451 else if (len < TPM_HEADER_SIZE)
@@ -658,7 +659,7 @@ static int tpm_continue_selftest(struct tpm_chip *chip)
658 return rc; 659 return rc;
659} 660}
660 661
661ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, 662ssize_t tpm_show_enabled(struct device *dev, struct device_attribute *attr,
662 char *buf) 663 char *buf)
663{ 664{
664 cap_t cap; 665 cap_t cap;
@@ -674,7 +675,7 @@ ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
674} 675}
675EXPORT_SYMBOL_GPL(tpm_show_enabled); 676EXPORT_SYMBOL_GPL(tpm_show_enabled);
676 677
677ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, 678ssize_t tpm_show_active(struct device *dev, struct device_attribute *attr,
678 char *buf) 679 char *buf)
679{ 680{
680 cap_t cap; 681 cap_t cap;
@@ -690,7 +691,7 @@ ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
690} 691}
691EXPORT_SYMBOL_GPL(tpm_show_active); 692EXPORT_SYMBOL_GPL(tpm_show_active);
692 693
693ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, 694ssize_t tpm_show_owned(struct device *dev, struct device_attribute *attr,
694 char *buf) 695 char *buf)
695{ 696{
696 cap_t cap; 697 cap_t cap;
@@ -706,8 +707,8 @@ ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
706} 707}
707EXPORT_SYMBOL_GPL(tpm_show_owned); 708EXPORT_SYMBOL_GPL(tpm_show_owned);
708 709
709ssize_t tpm_show_temp_deactivated(struct device * dev, 710ssize_t tpm_show_temp_deactivated(struct device *dev,
710 struct device_attribute * attr, char *buf) 711 struct device_attribute *attr, char *buf)
711{ 712{
712 cap_t cap; 713 cap_t cap;
713 ssize_t rc; 714 ssize_t rc;
@@ -769,10 +770,10 @@ static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
769 770
770/** 771/**
771 * tpm_pcr_read - read a pcr value 772 * tpm_pcr_read - read a pcr value
772 * @chip_num: tpm idx # or ANY 773 * @chip_num: tpm idx # or ANY
773 * @pcr_idx: pcr idx to retrieve 774 * @pcr_idx: pcr idx to retrieve
774 * @res_buf: TPM_PCR value 775 * @res_buf: TPM_PCR value
775 * size of res_buf is 20 bytes (or NULL if you don't care) 776 * size of res_buf is 20 bytes (or NULL if you don't care)
776 * 777 *
777 * The TPM driver should be built-in, but for whatever reason it 778 * The TPM driver should be built-in, but for whatever reason it
778 * isn't, protect against the chip disappearing, by incrementing 779 * isn't, protect against the chip disappearing, by incrementing
@@ -794,9 +795,9 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
794 795
795/** 796/**
796 * tpm_pcr_extend - extend pcr value with hash 797 * tpm_pcr_extend - extend pcr value with hash
797 * @chip_num: tpm idx # or AN& 798 * @chip_num: tpm idx # or AN&
798 * @pcr_idx: pcr idx to extend 799 * @pcr_idx: pcr idx to extend
799 * @hash: hash value used to extend pcr value 800 * @hash: hash value used to extend pcr value
800 * 801 *
801 * The TPM driver should be built-in, but for whatever reason it 802 * The TPM driver should be built-in, but for whatever reason it
802 * isn't, protect against the chip disappearing, by incrementing 803 * isn't, protect against the chip disappearing, by incrementing
@@ -847,8 +848,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
847 unsigned long duration; 848 unsigned long duration;
848 struct tpm_cmd_t cmd; 849 struct tpm_cmd_t cmd;
849 850
850 duration = tpm_calc_ordinal_duration(chip, 851 duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST);
851 TPM_ORD_CONTINUE_SELFTEST);
852 852
853 loops = jiffies_to_msecs(duration) / delay_msec; 853 loops = jiffies_to_msecs(duration) / delay_msec;
854 854
@@ -965,12 +965,12 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
965 if (err) 965 if (err)
966 goto out; 966 goto out;
967 967
968 /* 968 /*
969 ignore header 10 bytes 969 ignore header 10 bytes
970 algorithm 32 bits (1 == RSA ) 970 algorithm 32 bits (1 == RSA )
971 encscheme 16 bits 971 encscheme 16 bits
972 sigscheme 16 bits 972 sigscheme 16 bits
973 parameters (RSA 12->bytes: keybit, #primes, expbit) 973 parameters (RSA 12->bytes: keybit, #primes, expbit)
974 keylenbytes 32 bits 974 keylenbytes 32 bits
975 256 byte modulus 975 256 byte modulus
976 ignore checksum 20 bytes 976 ignore checksum 20 bytes
@@ -1020,43 +1020,33 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
1020 str += sprintf(str, "Manufacturer: 0x%x\n", 1020 str += sprintf(str, "Manufacturer: 0x%x\n",
1021 be32_to_cpu(cap.manufacturer_id)); 1021 be32_to_cpu(cap.manufacturer_id));
1022 1022
1023 rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, 1023 /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
1024 "attempting to determine the 1.1 version");
1025 if (rc)
1026 return 0;
1027 str += sprintf(str,
1028 "TCG version: %d.%d\nFirmware version: %d.%d\n",
1029 cap.tpm_version.Major, cap.tpm_version.Minor,
1030 cap.tpm_version.revMajor, cap.tpm_version.revMinor);
1031 return str - buf;
1032}
1033EXPORT_SYMBOL_GPL(tpm_show_caps);
1034
1035ssize_t tpm_show_caps_1_2(struct device * dev,
1036 struct device_attribute * attr, char *buf)
1037{
1038 cap_t cap;
1039 ssize_t rc;
1040 char *str = buf;
1041
1042 rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
1043 "attempting to determine the manufacturer");
1044 if (rc)
1045 return 0;
1046 str += sprintf(str, "Manufacturer: 0x%x\n",
1047 be32_to_cpu(cap.manufacturer_id));
1048 rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap, 1024 rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
1049 "attempting to determine the 1.2 version"); 1025 "attempting to determine the 1.2 version");
1050 if (rc) 1026 if (!rc) {
1051 return 0; 1027 str += sprintf(str,
1052 str += sprintf(str, 1028 "TCG version: %d.%d\nFirmware version: %d.%d\n",
1053 "TCG version: %d.%d\nFirmware version: %d.%d\n", 1029 cap.tpm_version_1_2.Major,
1054 cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor, 1030 cap.tpm_version_1_2.Minor,
1055 cap.tpm_version_1_2.revMajor, 1031 cap.tpm_version_1_2.revMajor,
1056 cap.tpm_version_1_2.revMinor); 1032 cap.tpm_version_1_2.revMinor);
1033 } else {
1034 /* Otherwise just use TPM_STRUCT_VER */
1035 rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
1036 "attempting to determine the 1.1 version");
1037 if (rc)
1038 return 0;
1039 str += sprintf(str,
1040 "TCG version: %d.%d\nFirmware version: %d.%d\n",
1041 cap.tpm_version.Major,
1042 cap.tpm_version.Minor,
1043 cap.tpm_version.revMajor,
1044 cap.tpm_version.revMinor);
1045 }
1046
1057 return str - buf; 1047 return str - buf;
1058} 1048}
1059EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); 1049EXPORT_SYMBOL_GPL(tpm_show_caps);
1060 1050
1061ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr, 1051ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
1062 char *buf) 1052 char *buf)
@@ -1102,8 +1092,8 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
1102} 1092}
1103EXPORT_SYMBOL_GPL(tpm_store_cancel); 1093EXPORT_SYMBOL_GPL(tpm_store_cancel);
1104 1094
1105static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel, 1095static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
1106 bool *canceled) 1096 bool check_cancel, bool *canceled)
1107{ 1097{
1108 u8 status = chip->vendor.status(chip); 1098 u8 status = chip->vendor.status(chip);
1109 1099
@@ -1170,38 +1160,25 @@ EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
1170 */ 1160 */
1171int tpm_open(struct inode *inode, struct file *file) 1161int tpm_open(struct inode *inode, struct file *file)
1172{ 1162{
1173 int minor = iminor(inode); 1163 struct miscdevice *misc = file->private_data;
1174 struct tpm_chip *chip = NULL, *pos; 1164 struct tpm_chip *chip = container_of(misc, struct tpm_chip,
1175 1165 vendor.miscdev);
1176 rcu_read_lock();
1177 list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
1178 if (pos->vendor.miscdev.minor == minor) {
1179 chip = pos;
1180 get_device(chip->dev);
1181 break;
1182 }
1183 }
1184 rcu_read_unlock();
1185
1186 if (!chip)
1187 return -ENODEV;
1188 1166
1189 if (test_and_set_bit(0, &chip->is_open)) { 1167 if (test_and_set_bit(0, &chip->is_open)) {
1190 dev_dbg(chip->dev, "Another process owns this TPM\n"); 1168 dev_dbg(chip->dev, "Another process owns this TPM\n");
1191 put_device(chip->dev);
1192 return -EBUSY; 1169 return -EBUSY;
1193 } 1170 }
1194 1171
1195 chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); 1172 chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL);
1196 if (chip->data_buffer == NULL) { 1173 if (chip->data_buffer == NULL) {
1197 clear_bit(0, &chip->is_open); 1174 clear_bit(0, &chip->is_open);
1198 put_device(chip->dev);
1199 return -ENOMEM; 1175 return -ENOMEM;
1200 } 1176 }
1201 1177
1202 atomic_set(&chip->data_pending, 0); 1178 atomic_set(&chip->data_pending, 0);
1203 1179
1204 file->private_data = chip; 1180 file->private_data = chip;
1181 get_device(chip->dev);
1205 return 0; 1182 return 0;
1206} 1183}
1207EXPORT_SYMBOL_GPL(tpm_open); 1184EXPORT_SYMBOL_GPL(tpm_open);
@@ -1463,7 +1440,6 @@ void tpm_dev_vendor_release(struct tpm_chip *chip)
1463 chip->vendor.release(chip->dev); 1440 chip->vendor.release(chip->dev);
1464 1441
1465 clear_bit(chip->dev_num, dev_mask); 1442 clear_bit(chip->dev_num, dev_mask);
1466 kfree(chip->vendor.miscdev.name);
1467} 1443}
1468EXPORT_SYMBOL_GPL(tpm_dev_vendor_release); 1444EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
1469 1445
@@ -1487,7 +1463,7 @@ void tpm_dev_release(struct device *dev)
1487EXPORT_SYMBOL_GPL(tpm_dev_release); 1463EXPORT_SYMBOL_GPL(tpm_dev_release);
1488 1464
1489/* 1465/*
1490 * Called from tpm_<specific>.c probe function only for devices 1466 * Called from tpm_<specific>.c probe function only for devices
1491 * the driver has determined it should claim. Prior to calling 1467 * the driver has determined it should claim. Prior to calling
1492 * this function the specific probe function has called pci_enable_device 1468 * this function the specific probe function has called pci_enable_device
1493 * upon errant exit from this function specific probe function should call 1469 * upon errant exit from this function specific probe function should call
@@ -1496,17 +1472,13 @@ EXPORT_SYMBOL_GPL(tpm_dev_release);
1496struct tpm_chip *tpm_register_hardware(struct device *dev, 1472struct tpm_chip *tpm_register_hardware(struct device *dev,
1497 const struct tpm_vendor_specific *entry) 1473 const struct tpm_vendor_specific *entry)
1498{ 1474{
1499#define DEVNAME_SIZE 7
1500
1501 char *devname;
1502 struct tpm_chip *chip; 1475 struct tpm_chip *chip;
1503 1476
1504 /* Driver specific per-device data */ 1477 /* Driver specific per-device data */
1505 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 1478 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
1506 devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
1507 1479
1508 if (chip == NULL || devname == NULL) 1480 if (chip == NULL)
1509 goto out_free; 1481 return NULL;
1510 1482
1511 mutex_init(&chip->buffer_mutex); 1483 mutex_init(&chip->buffer_mutex);
1512 mutex_init(&chip->tpm_mutex); 1484 mutex_init(&chip->tpm_mutex);
@@ -1531,8 +1503,9 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
1531 1503
1532 set_bit(chip->dev_num, dev_mask); 1504 set_bit(chip->dev_num, dev_mask);
1533 1505
1534 scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num); 1506 scnprintf(chip->devname, sizeof(chip->devname), "%s%d", "tpm",
1535 chip->vendor.miscdev.name = devname; 1507 chip->dev_num);
1508 chip->vendor.miscdev.name = chip->devname;
1536 1509
1537 chip->vendor.miscdev.parent = dev; 1510 chip->vendor.miscdev.parent = dev;
1538 chip->dev = get_device(dev); 1511 chip->dev = get_device(dev);
@@ -1558,7 +1531,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
1558 goto put_device; 1531 goto put_device;
1559 } 1532 }
1560 1533
1561 chip->bios_dir = tpm_bios_log_setup(devname); 1534 chip->bios_dir = tpm_bios_log_setup(chip->devname);
1562 1535
1563 /* Make chip available */ 1536 /* Make chip available */
1564 spin_lock(&driver_lock); 1537 spin_lock(&driver_lock);
@@ -1571,7 +1544,6 @@ put_device:
1571 put_device(chip->dev); 1544 put_device(chip->dev);
1572out_free: 1545out_free:
1573 kfree(chip); 1546 kfree(chip);
1574 kfree(devname);
1575 return NULL; 1547 return NULL;
1576} 1548}
1577EXPORT_SYMBOL_GPL(tpm_register_hardware); 1549EXPORT_SYMBOL_GPL(tpm_register_hardware);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index a7bfc176ed43..f32847872193 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -59,8 +59,6 @@ extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
59 char *); 59 char *);
60extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr, 60extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr,
61 char *); 61 char *);
62extern ssize_t tpm_show_caps_1_2(struct device *, struct device_attribute *attr,
63 char *);
64extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr, 62extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr,
65 const char *, size_t); 63 const char *, size_t);
66extern ssize_t tpm_show_enabled(struct device *, struct device_attribute *attr, 64extern ssize_t tpm_show_enabled(struct device *, struct device_attribute *attr,
@@ -122,6 +120,7 @@ struct tpm_chip {
122 struct device *dev; /* Device stuff */ 120 struct device *dev; /* Device stuff */
123 121
124 int dev_num; /* /dev/tpm# */ 122 int dev_num; /* /dev/tpm# */
123 char devname[7];
125 unsigned long is_open; /* only one allowed */ 124 unsigned long is_open; /* only one allowed */
126 int time_expired; 125 int time_expired;
127 126
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 99d6820c611d..c9a528d25d22 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -202,7 +202,7 @@ static int __init init_atmel(void)
202 202
203 have_region = 203 have_region =
204 (atmel_request_region 204 (atmel_request_region
205 (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1; 205 (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
206 206
207 pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0); 207 pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0);
208 if (IS_ERR(pdev)) { 208 if (IS_ERR(pdev)) {
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index 84ddc557b8f8..59f7cb28260b 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -406,7 +406,6 @@ out_tpm:
406out: 406out:
407 return NULL; 407 return NULL;
408} 408}
409EXPORT_SYMBOL_GPL(tpm_bios_log_setup);
410 409
411void tpm_bios_log_teardown(struct dentry **lst) 410void tpm_bios_log_teardown(struct dentry **lst)
412{ 411{
@@ -415,5 +414,3 @@ void tpm_bios_log_teardown(struct dentry **lst)
415 for (i = 0; i < 3; i++) 414 for (i = 0; i < 3; i++)
416 securityfs_remove(lst[i]); 415 securityfs_remove(lst[i]);
417} 416}
418EXPORT_SYMBOL_GPL(tpm_bios_log_teardown);
419MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
new file mode 100644
index 000000000000..c3cd7fe481a1
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -0,0 +1,284 @@
1/*
2 * ATMEL I2C TPM AT97SC3204T
3 *
4 * Copyright (C) 2012 V Lab Technologies
5 * Teddy Reed <teddy@prosauce.org>
6 * Copyright (C) 2013, Obsidian Research Corp.
7 * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
8 * Device driver for ATMEL I2C TPMs.
9 *
10 * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM
11 * devices the raw TCG formatted TPM command data is written via I2C and then
12 * raw TCG formatted TPM command data is returned via I2C.
13 *
14 * TGC status/locality/etc functions seen in the LPC implementation do not
15 * seem to be present.
16 *
17 * This program is free software: you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation, either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program. If not, see http://www.gnu.org/licenses/>.
29 */
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/moduleparam.h>
33#include <linux/slab.h>
34#include <linux/i2c.h>
35#include "tpm.h"
36
37#define I2C_DRIVER_NAME "tpm_i2c_atmel"
38
39#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
40#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
41
42#define ATMEL_STS_OK 1
43
44struct priv_data {
45 size_t len;
46 /* This is the amount we read on the first try. 25 was chosen to fit a
47 * fair number of read responses in the buffer so a 2nd retry can be
48 * avoided in small message cases. */
49 u8 buffer[sizeof(struct tpm_output_header) + 25];
50};
51
52static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
53{
54 struct priv_data *priv = chip->vendor.priv;
55 struct i2c_client *client = to_i2c_client(chip->dev);
56 s32 status;
57
58 priv->len = 0;
59
60 if (len <= 2)
61 return -EIO;
62
63 status = i2c_master_send(client, buf, len);
64
65 dev_dbg(chip->dev,
66 "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
67 (int)min_t(size_t, 64, len), buf, len, status);
68 return status;
69}
70
71static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
72{
73 struct priv_data *priv = chip->vendor.priv;
74 struct i2c_client *client = to_i2c_client(chip->dev);
75 struct tpm_output_header *hdr =
76 (struct tpm_output_header *)priv->buffer;
77 u32 expected_len;
78 int rc;
79
80 if (priv->len == 0)
81 return -EIO;
82
83 /* Get the message size from the message header, if we didn't get the
84 * whole message in read_status then we need to re-read the
85 * message. */
86 expected_len = be32_to_cpu(hdr->length);
87 if (expected_len > count)
88 return -ENOMEM;
89
90 if (priv->len >= expected_len) {
91 dev_dbg(chip->dev,
92 "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
93 (int)min_t(size_t, 64, expected_len), buf, count,
94 expected_len);
95 memcpy(buf, priv->buffer, expected_len);
96 return expected_len;
97 }
98
99 rc = i2c_master_recv(client, buf, expected_len);
100 dev_dbg(chip->dev,
101 "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
102 (int)min_t(size_t, 64, expected_len), buf, count,
103 expected_len);
104 return rc;
105}
106
107static void i2c_atmel_cancel(struct tpm_chip *chip)
108{
109 dev_err(chip->dev, "TPM operation cancellation was requested, but is not supported");
110}
111
112static u8 i2c_atmel_read_status(struct tpm_chip *chip)
113{
114 struct priv_data *priv = chip->vendor.priv;
115 struct i2c_client *client = to_i2c_client(chip->dev);
116 int rc;
117
118 /* The TPM fails the I2C read until it is ready, so we do the entire
119 * transfer here and buffer it locally. This way the common code can
120 * properly handle the timeouts. */
121 priv->len = 0;
122 memset(priv->buffer, 0, sizeof(priv->buffer));
123
124
125 /* Once the TPM has completed the command the command remains readable
126 * until another command is issued. */
127 rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
128 dev_dbg(chip->dev,
129 "%s: sts=%d", __func__, rc);
130 if (rc <= 0)
131 return 0;
132
133 priv->len = rc;
134
135 return ATMEL_STS_OK;
136}
137
138static const struct file_operations i2c_atmel_ops = {
139 .owner = THIS_MODULE,
140 .llseek = no_llseek,
141 .open = tpm_open,
142 .read = tpm_read,
143 .write = tpm_write,
144 .release = tpm_release,
145};
146
147static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
148static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
149static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
150static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
151static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
152static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
153static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
154static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
155static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
156static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
157
158static struct attribute *i2c_atmel_attrs[] = {
159 &dev_attr_pubek.attr,
160 &dev_attr_pcrs.attr,
161 &dev_attr_enabled.attr,
162 &dev_attr_active.attr,
163 &dev_attr_owned.attr,
164 &dev_attr_temp_deactivated.attr,
165 &dev_attr_caps.attr,
166 &dev_attr_cancel.attr,
167 &dev_attr_durations.attr,
168 &dev_attr_timeouts.attr,
169 NULL,
170};
171
172static struct attribute_group i2c_atmel_attr_grp = {
173 .attrs = i2c_atmel_attrs
174};
175
176static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status)
177{
178 return 0;
179}
180
181static const struct tpm_vendor_specific i2c_atmel = {
182 .status = i2c_atmel_read_status,
183 .recv = i2c_atmel_recv,
184 .send = i2c_atmel_send,
185 .cancel = i2c_atmel_cancel,
186 .req_complete_mask = ATMEL_STS_OK,
187 .req_complete_val = ATMEL_STS_OK,
188 .req_canceled = i2c_atmel_req_canceled,
189 .attr_group = &i2c_atmel_attr_grp,
190 .miscdev.fops = &i2c_atmel_ops,
191};
192
193static int i2c_atmel_probe(struct i2c_client *client,
194 const struct i2c_device_id *id)
195{
196 int rc;
197 struct tpm_chip *chip;
198 struct device *dev = &client->dev;
199
200 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
201 return -ENODEV;
202
203 chip = tpm_register_hardware(dev, &i2c_atmel);
204 if (!chip) {
205 dev_err(dev, "%s() error in tpm_register_hardware\n", __func__);
206 return -ENODEV;
207 }
208
209 chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
210 GFP_KERNEL);
211
212 /* Default timeouts */
213 chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
214 chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
215 chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
216 chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
217 chip->vendor.irq = 0;
218
219 /* There is no known way to probe for this device, and all version
220 * information seems to be read via TPM commands. Thus we rely on the
221 * TPM startup process in the common code to detect the device. */
222 if (tpm_get_timeouts(chip)) {
223 rc = -ENODEV;
224 goto out_err;
225 }
226
227 if (tpm_do_selftest(chip)) {
228 rc = -ENODEV;
229 goto out_err;
230 }
231
232 return 0;
233
234out_err:
235 tpm_dev_vendor_release(chip);
236 tpm_remove_hardware(chip->dev);
237 return rc;
238}
239
240static int i2c_atmel_remove(struct i2c_client *client)
241{
242 struct device *dev = &(client->dev);
243 struct tpm_chip *chip = dev_get_drvdata(dev);
244
245 if (chip)
246 tpm_dev_vendor_release(chip);
247 tpm_remove_hardware(dev);
248 kfree(chip);
249 return 0;
250}
251
252static const struct i2c_device_id i2c_atmel_id[] = {
253 {I2C_DRIVER_NAME, 0},
254 {}
255};
256MODULE_DEVICE_TABLE(i2c, i2c_atmel_id);
257
258#ifdef CONFIG_OF
259static const struct of_device_id i2c_atmel_of_match[] = {
260 {.compatible = "atmel,at97sc3204t"},
261 {},
262};
263MODULE_DEVICE_TABLE(of, i2c_atmel_of_match);
264#endif
265
266static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume);
267
268static struct i2c_driver i2c_atmel_driver = {
269 .id_table = i2c_atmel_id,
270 .probe = i2c_atmel_probe,
271 .remove = i2c_atmel_remove,
272 .driver = {
273 .name = I2C_DRIVER_NAME,
274 .owner = THIS_MODULE,
275 .pm = &i2c_atmel_pm_ops,
276 .of_match_table = of_match_ptr(i2c_atmel_of_match),
277 },
278};
279
280module_i2c_driver(i2c_atmel_driver);
281
282MODULE_AUTHOR("Jason Gunthorpe <jgunthorpe@obsidianresearch.com>");
283MODULE_DESCRIPTION("Atmel TPM I2C Driver");
284MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index b8735de8ce95..fefd2aa5c81e 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -581,7 +581,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
581static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); 581static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
582static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); 582static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
583static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); 583static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
584static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 584static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
585static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 585static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
586static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 586static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
587static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 587static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
@@ -685,7 +685,6 @@ out_vendor:
685 chip->dev->release = NULL; 685 chip->dev->release = NULL;
686 chip->release = NULL; 686 chip->release = NULL;
687 tpm_dev.client = NULL; 687 tpm_dev.client = NULL;
688 dev_set_drvdata(chip->dev, chip);
689out_err: 688out_err:
690 return rc; 689 return rc;
691} 690}
@@ -766,7 +765,6 @@ static int tpm_tis_i2c_remove(struct i2c_client *client)
766 chip->dev->release = NULL; 765 chip->dev->release = NULL;
767 chip->release = NULL; 766 chip->release = NULL;
768 tpm_dev.client = NULL; 767 tpm_dev.client = NULL;
769 dev_set_drvdata(chip->dev, chip);
770 768
771 return 0; 769 return 0;
772} 770}
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
new file mode 100644
index 000000000000..6276fea01ff0
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -0,0 +1,710 @@
1/******************************************************************************
2 * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501,
3 * based on the TCG TPM Interface Spec version 1.2.
4 * Specifications at www.trustedcomputinggroup.org
5 *
6 * Copyright (C) 2011, Nuvoton Technology Corporation.
7 * Dan Morav <dan.morav@nuvoton.com>
8 * Copyright (C) 2013, Obsidian Research Corp.
9 * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see http://www.gnu.org/licenses/>.
23 *
24 * Nuvoton contact information: APC.Support@nuvoton.com
25 *****************************************************************************/
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/slab.h>
31#include <linux/interrupt.h>
32#include <linux/wait.h>
33#include <linux/i2c.h>
34#include "tpm.h"
35
36/* I2C interface offsets */
37#define TPM_STS 0x00
38#define TPM_BURST_COUNT 0x01
39#define TPM_DATA_FIFO_W 0x20
40#define TPM_DATA_FIFO_R 0x40
41#define TPM_VID_DID_RID 0x60
42/* TPM command header size */
43#define TPM_HEADER_SIZE 10
44#define TPM_RETRY 5
45/*
46 * I2C bus device maximum buffer size w/o counting I2C address or command
47 * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
48 */
49#define TPM_I2C_MAX_BUF_SIZE 32
50#define TPM_I2C_RETRY_COUNT 32
51#define TPM_I2C_BUS_DELAY 1 /* msec */
52#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */
53#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */
54
55#define I2C_DRIVER_NAME "tpm_i2c_nuvoton"
56
57struct priv_data {
58 unsigned int intrs;
59};
60
61static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size,
62 u8 *data)
63{
64 s32 status;
65
66 status = i2c_smbus_read_i2c_block_data(client, offset, size, data);
67 dev_dbg(&client->dev,
68 "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
69 offset, size, (int)size, data, status);
70 return status;
71}
72
73static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
74 u8 *data)
75{
76 s32 status;
77
78 status = i2c_smbus_write_i2c_block_data(client, offset, size, data);
79 dev_dbg(&client->dev,
80 "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
81 offset, size, (int)size, data, status);
82 return status;
83}
84
85#define TPM_STS_VALID 0x80
86#define TPM_STS_COMMAND_READY 0x40
87#define TPM_STS_GO 0x20
88#define TPM_STS_DATA_AVAIL 0x10
89#define TPM_STS_EXPECT 0x08
90#define TPM_STS_RESPONSE_RETRY 0x02
91#define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */
92
93#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
94#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
95
96/* read TPM_STS register */
97static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
98{
99 struct i2c_client *client = to_i2c_client(chip->dev);
100 s32 status;
101 u8 data;
102
103 status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
104 if (status <= 0) {
105 dev_err(chip->dev, "%s() error return %d\n", __func__,
106 status);
107 data = TPM_STS_ERR_VAL;
108 }
109
110 return data;
111}
112
113/* write byte to TPM_STS register */
114static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
115{
116 s32 status;
117 int i;
118
119 /* this causes the current command to be aborted */
120 for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
121 status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
122 msleep(TPM_I2C_BUS_DELAY);
123 }
124 return status;
125}
126
127/* write commandReady to TPM_STS register */
128static void i2c_nuvoton_ready(struct tpm_chip *chip)
129{
130 struct i2c_client *client = to_i2c_client(chip->dev);
131 s32 status;
132
133 /* this causes the current command to be aborted */
134 status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
135 if (status < 0)
136 dev_err(chip->dev,
137 "%s() fail to write TPM_STS.commandReady\n", __func__);
138}
139
140/* read burstCount field from TPM_STS register
141 * return -1 on fail to read */
142static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
143 struct tpm_chip *chip)
144{
145 unsigned long stop = jiffies + chip->vendor.timeout_d;
146 s32 status;
147 int burst_count = -1;
148 u8 data;
149
150 /* wait for burstcount to be non-zero */
151 do {
152 /* in I2C burstCount is 1 byte */
153 status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1,
154 &data);
155 if (status > 0 && data > 0) {
156 burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
157 break;
158 }
159 msleep(TPM_I2C_BUS_DELAY);
160 } while (time_before(jiffies, stop));
161
162 return burst_count;
163}
164
165/*
166 * WPCT301/NPCT501 SINT# supports only dataAvail
167 * any call to this function which is not waiting for dataAvail will
168 * set queue to NULL to avoid waiting for interrupt
169 */
170static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value)
171{
172 u8 status = i2c_nuvoton_read_status(chip);
173 return (status != TPM_STS_ERR_VAL) && ((status & mask) == value);
174}
175
176static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
177 u32 timeout, wait_queue_head_t *queue)
178{
179 if (chip->vendor.irq && queue) {
180 s32 rc;
181 DEFINE_WAIT(wait);
182 struct priv_data *priv = chip->vendor.priv;
183 unsigned int cur_intrs = priv->intrs;
184
185 enable_irq(chip->vendor.irq);
186 rc = wait_event_interruptible_timeout(*queue,
187 cur_intrs != priv->intrs,
188 timeout);
189 if (rc > 0)
190 return 0;
191 /* At this point we know that the SINT pin is asserted, so we
192 * do not need to do i2c_nuvoton_check_status */
193 } else {
194 unsigned long ten_msec, stop;
195 bool status_valid;
196
197 /* check current status */
198 status_valid = i2c_nuvoton_check_status(chip, mask, value);
199 if (status_valid)
200 return 0;
201
202 /* use polling to wait for the event */
203 ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
204 stop = jiffies + timeout;
205 do {
206 if (time_before(jiffies, ten_msec))
207 msleep(TPM_I2C_RETRY_DELAY_SHORT);
208 else
209 msleep(TPM_I2C_RETRY_DELAY_LONG);
210 status_valid = i2c_nuvoton_check_status(chip, mask,
211 value);
212 if (status_valid)
213 return 0;
214 } while (time_before(jiffies, stop));
215 }
216 dev_err(chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
217 value);
218 return -ETIMEDOUT;
219}
220
221/* wait for dataAvail field to be set in the TPM_STS register */
222static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout,
223 wait_queue_head_t *queue)
224{
225 return i2c_nuvoton_wait_for_stat(chip,
226 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
227 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
228 timeout, queue);
229}
230
231/* Read @count bytes into @buf from TPM_RD_FIFO register */
232static int i2c_nuvoton_recv_data(struct i2c_client *client,
233 struct tpm_chip *chip, u8 *buf, size_t count)
234{
235 s32 rc;
236 int burst_count, bytes2read, size = 0;
237
238 while (size < count &&
239 i2c_nuvoton_wait_for_data_avail(chip,
240 chip->vendor.timeout_c,
241 &chip->vendor.read_queue) == 0) {
242 burst_count = i2c_nuvoton_get_burstcount(client, chip);
243 if (burst_count < 0) {
244 dev_err(chip->dev,
245 "%s() fail to read burstCount=%d\n", __func__,
246 burst_count);
247 return -EIO;
248 }
249 bytes2read = min_t(size_t, burst_count, count - size);
250 rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
251 bytes2read, &buf[size]);
252 if (rc < 0) {
253 dev_err(chip->dev,
254 "%s() fail on i2c_nuvoton_read_buf()=%d\n",
255 __func__, rc);
256 return -EIO;
257 }
258 dev_dbg(chip->dev, "%s(%d):", __func__, bytes2read);
259 size += bytes2read;
260 }
261
262 return size;
263}
264
265/* Read TPM command results */
266static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
267{
268 struct device *dev = chip->dev;
269 struct i2c_client *client = to_i2c_client(dev);
270 s32 rc;
271 int expected, status, burst_count, retries, size = 0;
272
273 if (count < TPM_HEADER_SIZE) {
274 i2c_nuvoton_ready(chip); /* return to idle */
275 dev_err(dev, "%s() count < header size\n", __func__);
276 return -EIO;
277 }
278 for (retries = 0; retries < TPM_RETRY; retries++) {
279 if (retries > 0) {
280 /* if this is not the first trial, set responseRetry */
281 i2c_nuvoton_write_status(client,
282 TPM_STS_RESPONSE_RETRY);
283 }
284 /*
285 * read first available (> 10 bytes), including:
286 * tag, paramsize, and result
287 */
288 status = i2c_nuvoton_wait_for_data_avail(
289 chip, chip->vendor.timeout_c, &chip->vendor.read_queue);
290 if (status != 0) {
291 dev_err(dev, "%s() timeout on dataAvail\n", __func__);
292 size = -ETIMEDOUT;
293 continue;
294 }
295 burst_count = i2c_nuvoton_get_burstcount(client, chip);
296 if (burst_count < 0) {
297 dev_err(dev, "%s() fail to get burstCount\n", __func__);
298 size = -EIO;
299 continue;
300 }
301 size = i2c_nuvoton_recv_data(client, chip, buf,
302 burst_count);
303 if (size < TPM_HEADER_SIZE) {
304 dev_err(dev, "%s() fail to read header\n", __func__);
305 size = -EIO;
306 continue;
307 }
308 /*
309 * convert number of expected bytes field from big endian 32 bit
310 * to machine native
311 */
312 expected = be32_to_cpu(*(__be32 *) (buf + 2));
313 if (expected > count) {
314 dev_err(dev, "%s() expected > count\n", __func__);
315 size = -EIO;
316 continue;
317 }
318 rc = i2c_nuvoton_recv_data(client, chip, &buf[size],
319 expected - size);
320 size += rc;
321 if (rc < 0 || size < expected) {
322 dev_err(dev, "%s() fail to read remainder of result\n",
323 __func__);
324 size = -EIO;
325 continue;
326 }
327 if (i2c_nuvoton_wait_for_stat(
328 chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL,
329 TPM_STS_VALID, chip->vendor.timeout_c,
330 NULL)) {
331 dev_err(dev, "%s() error left over data\n", __func__);
332 size = -ETIMEDOUT;
333 continue;
334 }
335 break;
336 }
337 i2c_nuvoton_ready(chip);
338 dev_dbg(chip->dev, "%s() -> %d\n", __func__, size);
339 return size;
340}
341
342/*
343 * Send TPM command.
344 *
345 * If interrupts are used (signaled by an irq set in the vendor structure)
346 * tpm.c can skip polling for the data to be available as the interrupt is
347 * waited for here
348 */
349static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
350{
351 struct device *dev = chip->dev;
352 struct i2c_client *client = to_i2c_client(dev);
353 u32 ordinal;
354 size_t count = 0;
355 int burst_count, bytes2write, retries, rc = -EIO;
356
357 for (retries = 0; retries < TPM_RETRY; retries++) {
358 i2c_nuvoton_ready(chip);
359 if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY,
360 TPM_STS_COMMAND_READY,
361 chip->vendor.timeout_b, NULL)) {
362 dev_err(dev, "%s() timeout on commandReady\n",
363 __func__);
364 rc = -EIO;
365 continue;
366 }
367 rc = 0;
368 while (count < len - 1) {
369 burst_count = i2c_nuvoton_get_burstcount(client,
370 chip);
371 if (burst_count < 0) {
372 dev_err(dev, "%s() fail get burstCount\n",
373 __func__);
374 rc = -EIO;
375 break;
376 }
377 bytes2write = min_t(size_t, burst_count,
378 len - 1 - count);
379 rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W,
380 bytes2write, &buf[count]);
381 if (rc < 0) {
382 dev_err(dev, "%s() fail i2cWriteBuf\n",
383 __func__);
384 break;
385 }
386 dev_dbg(dev, "%s(%d):", __func__, bytes2write);
387 count += bytes2write;
388 rc = i2c_nuvoton_wait_for_stat(chip,
389 TPM_STS_VALID |
390 TPM_STS_EXPECT,
391 TPM_STS_VALID |
392 TPM_STS_EXPECT,
393 chip->vendor.timeout_c,
394 NULL);
395 if (rc < 0) {
396 dev_err(dev, "%s() timeout on Expect\n",
397 __func__);
398 rc = -ETIMEDOUT;
399 break;
400 }
401 }
402 if (rc < 0)
403 continue;
404
405 /* write last byte */
406 rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1,
407 &buf[count]);
408 if (rc < 0) {
409 dev_err(dev, "%s() fail to write last byte\n",
410 __func__);
411 rc = -EIO;
412 continue;
413 }
414 dev_dbg(dev, "%s(last): %02x", __func__, buf[count]);
415 rc = i2c_nuvoton_wait_for_stat(chip,
416 TPM_STS_VALID | TPM_STS_EXPECT,
417 TPM_STS_VALID,
418 chip->vendor.timeout_c, NULL);
419 if (rc) {
420 dev_err(dev, "%s() timeout on Expect to clear\n",
421 __func__);
422 rc = -ETIMEDOUT;
423 continue;
424 }
425 break;
426 }
427 if (rc < 0) {
428 /* retries == TPM_RETRY */
429 i2c_nuvoton_ready(chip);
430 return rc;
431 }
432 /* execute the TPM command */
433 rc = i2c_nuvoton_write_status(client, TPM_STS_GO);
434 if (rc < 0) {
435 dev_err(dev, "%s() fail to write Go\n", __func__);
436 i2c_nuvoton_ready(chip);
437 return rc;
438 }
439 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
440 rc = i2c_nuvoton_wait_for_data_avail(chip,
441 tpm_calc_ordinal_duration(chip,
442 ordinal),
443 &chip->vendor.read_queue);
444 if (rc) {
445 dev_err(dev, "%s() timeout command duration\n", __func__);
446 i2c_nuvoton_ready(chip);
447 return rc;
448 }
449
450 dev_dbg(dev, "%s() -> %zd\n", __func__, len);
451 return len;
452}
453
454static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
455{
456 return (status == TPM_STS_COMMAND_READY);
457}
458
459static const struct file_operations i2c_nuvoton_ops = {
460 .owner = THIS_MODULE,
461 .llseek = no_llseek,
462 .open = tpm_open,
463 .read = tpm_read,
464 .write = tpm_write,
465 .release = tpm_release,
466};
467
468static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
469static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
470static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
471static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
472static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
473static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
474static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
475static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
476static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
477static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
478
479static struct attribute *i2c_nuvoton_attrs[] = {
480 &dev_attr_pubek.attr,
481 &dev_attr_pcrs.attr,
482 &dev_attr_enabled.attr,
483 &dev_attr_active.attr,
484 &dev_attr_owned.attr,
485 &dev_attr_temp_deactivated.attr,
486 &dev_attr_caps.attr,
487 &dev_attr_cancel.attr,
488 &dev_attr_durations.attr,
489 &dev_attr_timeouts.attr,
490 NULL,
491};
492
493static struct attribute_group i2c_nuvoton_attr_grp = {
494 .attrs = i2c_nuvoton_attrs
495};
496
497static const struct tpm_vendor_specific tpm_i2c = {
498 .status = i2c_nuvoton_read_status,
499 .recv = i2c_nuvoton_recv,
500 .send = i2c_nuvoton_send,
501 .cancel = i2c_nuvoton_ready,
502 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
503 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
504 .req_canceled = i2c_nuvoton_req_canceled,
505 .attr_group = &i2c_nuvoton_attr_grp,
506 .miscdev.fops = &i2c_nuvoton_ops,
507};
508
509/* The only purpose for the handler is to signal to any waiting threads that
510 * the interrupt is currently being asserted. The driver does not do any
511 * processing triggered by interrupts, and the chip provides no way to mask at
512 * the source (plus that would be slow over I2C). Run the IRQ as a one-shot,
513 * this means it cannot be shared. */
514static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id)
515{
516 struct tpm_chip *chip = dev_id;
517 struct priv_data *priv = chip->vendor.priv;
518
519 priv->intrs++;
520 wake_up(&chip->vendor.read_queue);
521 disable_irq_nosync(chip->vendor.irq);
522 return IRQ_HANDLED;
523}
524
525static int get_vid(struct i2c_client *client, u32 *res)
526{
527 static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe };
528 u32 temp;
529 s32 rc;
530
531 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
532 return -ENODEV;
533 rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp);
534 if (rc < 0)
535 return rc;
536
537 /* check WPCT301 values - ignore RID */
538 if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) {
539 /*
540 * f/w rev 2.81 has an issue where the VID_DID_RID is not
541 * reporting the right value. so give it another chance at
542 * offset 0x20 (FIFO_W).
543 */
544 rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4,
545 (u8 *) (&temp));
546 if (rc < 0)
547 return rc;
548
549 /* check WPCT301 values - ignore RID */
550 if (memcmp(&temp, vid_did_rid_value,
551 sizeof(vid_did_rid_value)))
552 return -ENODEV;
553 }
554
555 *res = temp;
556 return 0;
557}
558
559static int i2c_nuvoton_probe(struct i2c_client *client,
560 const struct i2c_device_id *id)
561{
562 int rc;
563 struct tpm_chip *chip;
564 struct device *dev = &client->dev;
565 u32 vid = 0;
566
567 rc = get_vid(client, &vid);
568 if (rc)
569 return rc;
570
571 dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid,
572 (u8) (vid >> 16), (u8) (vid >> 24));
573
574 chip = tpm_register_hardware(dev, &tpm_i2c);
575 if (!chip) {
576 dev_err(dev, "%s() error in tpm_register_hardware\n", __func__);
577 return -ENODEV;
578 }
579
580 chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
581 GFP_KERNEL);
582 init_waitqueue_head(&chip->vendor.read_queue);
583 init_waitqueue_head(&chip->vendor.int_queue);
584
585 /* Default timeouts */
586 chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
587 chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
588 chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
589 chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
590
591 /*
592 * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to:
593 * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT
594 * The IRQ should be set in the i2c_board_info (which is done
595 * automatically in of_i2c_register_devices, for device tree users */
596 chip->vendor.irq = client->irq;
597
598 if (chip->vendor.irq) {
599 dev_dbg(dev, "%s() chip-vendor.irq\n", __func__);
600 rc = devm_request_irq(dev, chip->vendor.irq,
601 i2c_nuvoton_int_handler,
602 IRQF_TRIGGER_LOW,
603 chip->vendor.miscdev.name,
604 chip);
605 if (rc) {
606 dev_err(dev, "%s() Unable to request irq: %d for use\n",
607 __func__, chip->vendor.irq);
608 chip->vendor.irq = 0;
609 } else {
610 /* Clear any pending interrupt */
611 i2c_nuvoton_ready(chip);
612 /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */
613 rc = i2c_nuvoton_wait_for_stat(chip,
614 TPM_STS_COMMAND_READY,
615 TPM_STS_COMMAND_READY,
616 chip->vendor.timeout_b,
617 NULL);
618 if (rc == 0) {
619 /*
620 * TIS is in ready state
621 * write dummy byte to enter reception state
622 * TPM_DATA_FIFO_W <- rc (0)
623 */
624 rc = i2c_nuvoton_write_buf(client,
625 TPM_DATA_FIFO_W,
626 1, (u8 *) (&rc));
627 if (rc < 0)
628 goto out_err;
629 /* TPM_STS <- 0x40 (commandReady) */
630 i2c_nuvoton_ready(chip);
631 } else {
632 /*
633 * timeout_b reached - command was
634 * aborted. TIS should now be in idle state -
635 * only TPM_STS_VALID should be set
636 */
637 if (i2c_nuvoton_read_status(chip) !=
638 TPM_STS_VALID) {
639 rc = -EIO;
640 goto out_err;
641 }
642 }
643 }
644 }
645
646 if (tpm_get_timeouts(chip)) {
647 rc = -ENODEV;
648 goto out_err;
649 }
650
651 if (tpm_do_selftest(chip)) {
652 rc = -ENODEV;
653 goto out_err;
654 }
655
656 return 0;
657
658out_err:
659 tpm_dev_vendor_release(chip);
660 tpm_remove_hardware(chip->dev);
661 return rc;
662}
663
664static int i2c_nuvoton_remove(struct i2c_client *client)
665{
666 struct device *dev = &(client->dev);
667 struct tpm_chip *chip = dev_get_drvdata(dev);
668
669 if (chip)
670 tpm_dev_vendor_release(chip);
671 tpm_remove_hardware(dev);
672 kfree(chip);
673 return 0;
674}
675
676
677static const struct i2c_device_id i2c_nuvoton_id[] = {
678 {I2C_DRIVER_NAME, 0},
679 {}
680};
681MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id);
682
683#ifdef CONFIG_OF
684static const struct of_device_id i2c_nuvoton_of_match[] = {
685 {.compatible = "nuvoton,npct501"},
686 {.compatible = "winbond,wpct301"},
687 {},
688};
689MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match);
690#endif
691
692static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume);
693
694static struct i2c_driver i2c_nuvoton_driver = {
695 .id_table = i2c_nuvoton_id,
696 .probe = i2c_nuvoton_probe,
697 .remove = i2c_nuvoton_remove,
698 .driver = {
699 .name = I2C_DRIVER_NAME,
700 .owner = THIS_MODULE,
701 .pm = &i2c_nuvoton_pm_ops,
702 .of_match_table = of_match_ptr(i2c_nuvoton_of_match),
703 },
704};
705
706module_i2c_driver(i2c_nuvoton_driver);
707
708MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)");
709MODULE_DESCRIPTION("Nuvoton TPM I2C Driver");
710MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index 5bb8e2ddd3b3..a0d6ceb5d005 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -584,7 +584,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
584static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); 584static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
585static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); 585static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
586static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); 586static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
587static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 587static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
588static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 588static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
589 589
590static struct attribute *stm_tpm_attrs[] = { 590static struct attribute *stm_tpm_attrs[] = {
@@ -746,8 +746,6 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
746 746
747 tpm_get_timeouts(chip); 747 tpm_get_timeouts(chip);
748 748
749 i2c_set_clientdata(client, chip);
750
751 dev_info(chip->dev, "TPM I2C Initialized\n"); 749 dev_info(chip->dev, "TPM I2C Initialized\n");
752 return 0; 750 return 0;
753_irq_set: 751_irq_set:
@@ -807,24 +805,18 @@ static int tpm_st33_i2c_remove(struct i2c_client *client)
807#ifdef CONFIG_PM_SLEEP 805#ifdef CONFIG_PM_SLEEP
808/* 806/*
809 * tpm_st33_i2c_pm_suspend suspend the TPM device 807 * tpm_st33_i2c_pm_suspend suspend the TPM device
810 * Added: Work around when suspend and no tpm application is running, suspend
811 * may fail because chip->data_buffer is not set (only set in tpm_open in Linux
812 * TPM core)
813 * @param: client, the i2c_client drescription (TPM I2C description). 808 * @param: client, the i2c_client drescription (TPM I2C description).
814 * @param: mesg, the power management message. 809 * @param: mesg, the power management message.
815 * @return: 0 in case of success. 810 * @return: 0 in case of success.
816 */ 811 */
817static int tpm_st33_i2c_pm_suspend(struct device *dev) 812static int tpm_st33_i2c_pm_suspend(struct device *dev)
818{ 813{
819 struct tpm_chip *chip = dev_get_drvdata(dev);
820 struct st33zp24_platform_data *pin_infos = dev->platform_data; 814 struct st33zp24_platform_data *pin_infos = dev->platform_data;
821 int ret = 0; 815 int ret = 0;
822 816
823 if (power_mgt) { 817 if (power_mgt) {
824 gpio_set_value(pin_infos->io_lpcpd, 0); 818 gpio_set_value(pin_infos->io_lpcpd, 0);
825 } else { 819 } else {
826 if (chip->data_buffer == NULL)
827 chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
828 ret = tpm_pm_suspend(dev); 820 ret = tpm_pm_suspend(dev);
829 } 821 }
830 return ret; 822 return ret;
@@ -849,8 +841,6 @@ static int tpm_st33_i2c_pm_resume(struct device *dev)
849 TPM_STS_VALID) == TPM_STS_VALID, 841 TPM_STS_VALID) == TPM_STS_VALID,
850 chip->vendor.timeout_b); 842 chip->vendor.timeout_b);
851 } else { 843 } else {
852 if (chip->data_buffer == NULL)
853 chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
854 ret = tpm_pm_resume(dev); 844 ret = tpm_pm_resume(dev);
855 if (!ret) 845 if (!ret)
856 tpm_do_selftest(chip); 846 tpm_do_selftest(chip);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 56b07c35a13e..2783a42aa732 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -98,7 +98,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
98 98
99 if (count < len) { 99 if (count < len) {
100 dev_err(ibmvtpm->dev, 100 dev_err(ibmvtpm->dev,
101 "Invalid size in recv: count=%ld, crq_size=%d\n", 101 "Invalid size in recv: count=%zd, crq_size=%d\n",
102 count, len); 102 count, len);
103 return -EIO; 103 return -EIO;
104 } 104 }
@@ -136,7 +136,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
136 136
137 if (count > ibmvtpm->rtce_size) { 137 if (count > ibmvtpm->rtce_size) {
138 dev_err(ibmvtpm->dev, 138 dev_err(ibmvtpm->dev,
139 "Invalid size in send: count=%ld, rtce_size=%d\n", 139 "Invalid size in send: count=%zd, rtce_size=%d\n",
140 count, ibmvtpm->rtce_size); 140 count, ibmvtpm->rtce_size);
141 return -EIO; 141 return -EIO;
142 } 142 }
@@ -419,7 +419,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
419static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); 419static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
420static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, 420static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
421 NULL); 421 NULL);
422static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 422static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
423static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 423static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
424static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 424static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
425static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 425static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 2168d15bc728..8e562dc65601 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -452,12 +452,8 @@ int tpm_add_ppi(struct kobject *parent)
452{ 452{
453 return sysfs_create_group(parent, &ppi_attr_grp); 453 return sysfs_create_group(parent, &ppi_attr_grp);
454} 454}
455EXPORT_SYMBOL_GPL(tpm_add_ppi);
456 455
457void tpm_remove_ppi(struct kobject *parent) 456void tpm_remove_ppi(struct kobject *parent)
458{ 457{
459 sysfs_remove_group(parent, &ppi_attr_grp); 458 sysfs_remove_group(parent, &ppi_attr_grp);
460} 459}
461EXPORT_SYMBOL_GPL(tpm_remove_ppi);
462
463MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 5796d0157ce0..1b74459c0723 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -448,7 +448,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
448static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); 448static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
449static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, 449static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
450 NULL); 450 NULL);
451static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 451static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
452static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 452static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
453static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 453static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
454static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 454static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 94c280d36e8b..c8ff4df81779 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -351,8 +351,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
351 351
352 tpm_get_timeouts(priv->chip); 352 tpm_get_timeouts(priv->chip);
353 353
354 dev_set_drvdata(&dev->dev, priv->chip);
355
356 return rv; 354 return rv;
357} 355}
358 356
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index bdb953e15d2a..5c07a56962db 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -87,6 +87,7 @@ config ARM_ARCH_TIMER
87config ARM_ARCH_TIMER_EVTSTREAM 87config ARM_ARCH_TIMER_EVTSTREAM
88 bool "Support for ARM architected timer event stream generation" 88 bool "Support for ARM architected timer event stream generation"
89 default y if ARM_ARCH_TIMER 89 default y if ARM_ARCH_TIMER
90 depends on ARM_ARCH_TIMER
90 help 91 help
91 This option enables support for event stream generation based on 92 This option enables support for event stream generation based on
92 the ARM architected timer. It is used for waking up CPUs executing 93 the ARM architected timer. It is used for waking up CPUs executing
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 4aac9ee0d0c0..3cf12834681e 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -313,8 +313,20 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
313 goto err1; 313 goto err1;
314 } 314 }
315 315
316 return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), 316 ret = clk_prepare(p->clk);
317 cfg->clockevent_rating); 317 if (ret < 0)
318 goto err2;
319
320 ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
321 cfg->clockevent_rating);
322 if (ret < 0)
323 goto err3;
324
325 return 0;
326 err3:
327 clk_unprepare(p->clk);
328 err2:
329 clk_put(p->clk);
318 err1: 330 err1:
319 iounmap(p->mapbase); 331 iounmap(p->mapbase);
320 err0: 332 err0:
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 78b8dae49628..63557cda0a7d 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -472,12 +472,26 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
472 ret = PTR_ERR(p->clk); 472 ret = PTR_ERR(p->clk);
473 goto err1; 473 goto err1;
474 } 474 }
475
476 ret = clk_prepare(p->clk);
477 if (ret < 0)
478 goto err2;
479
475 p->cs_enabled = false; 480 p->cs_enabled = false;
476 p->enable_count = 0; 481 p->enable_count = 0;
477 482
478 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), 483 ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
479 cfg->clockevent_rating, 484 cfg->clockevent_rating,
480 cfg->clocksource_rating); 485 cfg->clocksource_rating);
486 if (ret < 0)
487 goto err3;
488
489 return 0;
490
491 err3:
492 clk_unprepare(p->clk);
493 err2:
494 clk_put(p->clk);
481 err1: 495 err1:
482 iounmap(p->mapbase); 496 iounmap(p->mapbase);
483 err0: 497 err0:
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index c73fc2b74de2..18c5b9b16645 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -32,11 +32,23 @@
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <linux/pid_namespace.h> 33#include <linux/pid_namespace.h>
34 34
35#include <asm/unaligned.h>
36
37#include <linux/cn_proc.h> 35#include <linux/cn_proc.h>
38 36
39#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) 37/*
38 * Size of a cn_msg followed by a proc_event structure. Since the
39 * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
40 * add one 4-byte word to the size here, and then start the actual
41 * cn_msg structure 4 bytes into the stack buffer. The result is that
42 * the immediately following proc_event structure is aligned to 8 bytes.
43 */
44#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
45
46/* See comment above; we test our assumption about sizeof struct cn_msg here. */
47static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
48{
49 BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
50 return (struct cn_msg *)(buffer + 4);
51}
40 52
41static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); 53static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
42static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; 54static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
@@ -56,19 +68,19 @@ void proc_fork_connector(struct task_struct *task)
56{ 68{
57 struct cn_msg *msg; 69 struct cn_msg *msg;
58 struct proc_event *ev; 70 struct proc_event *ev;
59 __u8 buffer[CN_PROC_MSG_SIZE]; 71 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
60 struct timespec ts; 72 struct timespec ts;
61 struct task_struct *parent; 73 struct task_struct *parent;
62 74
63 if (atomic_read(&proc_event_num_listeners) < 1) 75 if (atomic_read(&proc_event_num_listeners) < 1)
64 return; 76 return;
65 77
66 msg = (struct cn_msg *)buffer; 78 msg = buffer_to_cn_msg(buffer);
67 ev = (struct proc_event *)msg->data; 79 ev = (struct proc_event *)msg->data;
68 memset(&ev->event_data, 0, sizeof(ev->event_data)); 80 memset(&ev->event_data, 0, sizeof(ev->event_data));
69 get_seq(&msg->seq, &ev->cpu); 81 get_seq(&msg->seq, &ev->cpu);
70 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 82 ktime_get_ts(&ts); /* get high res monotonic timestamp */
71 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 83 ev->timestamp_ns = timespec_to_ns(&ts);
72 ev->what = PROC_EVENT_FORK; 84 ev->what = PROC_EVENT_FORK;
73 rcu_read_lock(); 85 rcu_read_lock();
74 parent = rcu_dereference(task->real_parent); 86 parent = rcu_dereference(task->real_parent);
@@ -91,17 +103,17 @@ void proc_exec_connector(struct task_struct *task)
91 struct cn_msg *msg; 103 struct cn_msg *msg;
92 struct proc_event *ev; 104 struct proc_event *ev;
93 struct timespec ts; 105 struct timespec ts;
94 __u8 buffer[CN_PROC_MSG_SIZE]; 106 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
95 107
96 if (atomic_read(&proc_event_num_listeners) < 1) 108 if (atomic_read(&proc_event_num_listeners) < 1)
97 return; 109 return;
98 110
99 msg = (struct cn_msg *)buffer; 111 msg = buffer_to_cn_msg(buffer);
100 ev = (struct proc_event *)msg->data; 112 ev = (struct proc_event *)msg->data;
101 memset(&ev->event_data, 0, sizeof(ev->event_data)); 113 memset(&ev->event_data, 0, sizeof(ev->event_data));
102 get_seq(&msg->seq, &ev->cpu); 114 get_seq(&msg->seq, &ev->cpu);
103 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 115 ktime_get_ts(&ts); /* get high res monotonic timestamp */
104 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 116 ev->timestamp_ns = timespec_to_ns(&ts);
105 ev->what = PROC_EVENT_EXEC; 117 ev->what = PROC_EVENT_EXEC;
106 ev->event_data.exec.process_pid = task->pid; 118 ev->event_data.exec.process_pid = task->pid;
107 ev->event_data.exec.process_tgid = task->tgid; 119 ev->event_data.exec.process_tgid = task->tgid;
@@ -117,14 +129,14 @@ void proc_id_connector(struct task_struct *task, int which_id)
117{ 129{
118 struct cn_msg *msg; 130 struct cn_msg *msg;
119 struct proc_event *ev; 131 struct proc_event *ev;
120 __u8 buffer[CN_PROC_MSG_SIZE]; 132 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
121 struct timespec ts; 133 struct timespec ts;
122 const struct cred *cred; 134 const struct cred *cred;
123 135
124 if (atomic_read(&proc_event_num_listeners) < 1) 136 if (atomic_read(&proc_event_num_listeners) < 1)
125 return; 137 return;
126 138
127 msg = (struct cn_msg *)buffer; 139 msg = buffer_to_cn_msg(buffer);
128 ev = (struct proc_event *)msg->data; 140 ev = (struct proc_event *)msg->data;
129 memset(&ev->event_data, 0, sizeof(ev->event_data)); 141 memset(&ev->event_data, 0, sizeof(ev->event_data));
130 ev->what = which_id; 142 ev->what = which_id;
@@ -145,7 +157,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
145 rcu_read_unlock(); 157 rcu_read_unlock();
146 get_seq(&msg->seq, &ev->cpu); 158 get_seq(&msg->seq, &ev->cpu);
147 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 159 ktime_get_ts(&ts); /* get high res monotonic timestamp */
148 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 160 ev->timestamp_ns = timespec_to_ns(&ts);
149 161
150 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 162 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
151 msg->ack = 0; /* not used */ 163 msg->ack = 0; /* not used */
@@ -159,17 +171,17 @@ void proc_sid_connector(struct task_struct *task)
159 struct cn_msg *msg; 171 struct cn_msg *msg;
160 struct proc_event *ev; 172 struct proc_event *ev;
161 struct timespec ts; 173 struct timespec ts;
162 __u8 buffer[CN_PROC_MSG_SIZE]; 174 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
163 175
164 if (atomic_read(&proc_event_num_listeners) < 1) 176 if (atomic_read(&proc_event_num_listeners) < 1)
165 return; 177 return;
166 178
167 msg = (struct cn_msg *)buffer; 179 msg = buffer_to_cn_msg(buffer);
168 ev = (struct proc_event *)msg->data; 180 ev = (struct proc_event *)msg->data;
169 memset(&ev->event_data, 0, sizeof(ev->event_data)); 181 memset(&ev->event_data, 0, sizeof(ev->event_data));
170 get_seq(&msg->seq, &ev->cpu); 182 get_seq(&msg->seq, &ev->cpu);
171 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 183 ktime_get_ts(&ts); /* get high res monotonic timestamp */
172 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 184 ev->timestamp_ns = timespec_to_ns(&ts);
173 ev->what = PROC_EVENT_SID; 185 ev->what = PROC_EVENT_SID;
174 ev->event_data.sid.process_pid = task->pid; 186 ev->event_data.sid.process_pid = task->pid;
175 ev->event_data.sid.process_tgid = task->tgid; 187 ev->event_data.sid.process_tgid = task->tgid;
@@ -186,17 +198,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
186 struct cn_msg *msg; 198 struct cn_msg *msg;
187 struct proc_event *ev; 199 struct proc_event *ev;
188 struct timespec ts; 200 struct timespec ts;
189 __u8 buffer[CN_PROC_MSG_SIZE]; 201 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
190 202
191 if (atomic_read(&proc_event_num_listeners) < 1) 203 if (atomic_read(&proc_event_num_listeners) < 1)
192 return; 204 return;
193 205
194 msg = (struct cn_msg *)buffer; 206 msg = buffer_to_cn_msg(buffer);
195 ev = (struct proc_event *)msg->data; 207 ev = (struct proc_event *)msg->data;
196 memset(&ev->event_data, 0, sizeof(ev->event_data)); 208 memset(&ev->event_data, 0, sizeof(ev->event_data));
197 get_seq(&msg->seq, &ev->cpu); 209 get_seq(&msg->seq, &ev->cpu);
198 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 210 ktime_get_ts(&ts); /* get high res monotonic timestamp */
199 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 211 ev->timestamp_ns = timespec_to_ns(&ts);
200 ev->what = PROC_EVENT_PTRACE; 212 ev->what = PROC_EVENT_PTRACE;
201 ev->event_data.ptrace.process_pid = task->pid; 213 ev->event_data.ptrace.process_pid = task->pid;
202 ev->event_data.ptrace.process_tgid = task->tgid; 214 ev->event_data.ptrace.process_tgid = task->tgid;
@@ -221,17 +233,17 @@ void proc_comm_connector(struct task_struct *task)
221 struct cn_msg *msg; 233 struct cn_msg *msg;
222 struct proc_event *ev; 234 struct proc_event *ev;
223 struct timespec ts; 235 struct timespec ts;
224 __u8 buffer[CN_PROC_MSG_SIZE]; 236 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
225 237
226 if (atomic_read(&proc_event_num_listeners) < 1) 238 if (atomic_read(&proc_event_num_listeners) < 1)
227 return; 239 return;
228 240
229 msg = (struct cn_msg *)buffer; 241 msg = buffer_to_cn_msg(buffer);
230 ev = (struct proc_event *)msg->data; 242 ev = (struct proc_event *)msg->data;
231 memset(&ev->event_data, 0, sizeof(ev->event_data)); 243 memset(&ev->event_data, 0, sizeof(ev->event_data));
232 get_seq(&msg->seq, &ev->cpu); 244 get_seq(&msg->seq, &ev->cpu);
233 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 245 ktime_get_ts(&ts); /* get high res monotonic timestamp */
234 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 246 ev->timestamp_ns = timespec_to_ns(&ts);
235 ev->what = PROC_EVENT_COMM; 247 ev->what = PROC_EVENT_COMM;
236 ev->event_data.comm.process_pid = task->pid; 248 ev->event_data.comm.process_pid = task->pid;
237 ev->event_data.comm.process_tgid = task->tgid; 249 ev->event_data.comm.process_tgid = task->tgid;
@@ -248,18 +260,18 @@ void proc_coredump_connector(struct task_struct *task)
248{ 260{
249 struct cn_msg *msg; 261 struct cn_msg *msg;
250 struct proc_event *ev; 262 struct proc_event *ev;
251 __u8 buffer[CN_PROC_MSG_SIZE]; 263 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
252 struct timespec ts; 264 struct timespec ts;
253 265
254 if (atomic_read(&proc_event_num_listeners) < 1) 266 if (atomic_read(&proc_event_num_listeners) < 1)
255 return; 267 return;
256 268
257 msg = (struct cn_msg *)buffer; 269 msg = buffer_to_cn_msg(buffer);
258 ev = (struct proc_event *)msg->data; 270 ev = (struct proc_event *)msg->data;
259 memset(&ev->event_data, 0, sizeof(ev->event_data)); 271 memset(&ev->event_data, 0, sizeof(ev->event_data));
260 get_seq(&msg->seq, &ev->cpu); 272 get_seq(&msg->seq, &ev->cpu);
261 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 273 ktime_get_ts(&ts); /* get high res monotonic timestamp */
262 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 274 ev->timestamp_ns = timespec_to_ns(&ts);
263 ev->what = PROC_EVENT_COREDUMP; 275 ev->what = PROC_EVENT_COREDUMP;
264 ev->event_data.coredump.process_pid = task->pid; 276 ev->event_data.coredump.process_pid = task->pid;
265 ev->event_data.coredump.process_tgid = task->tgid; 277 ev->event_data.coredump.process_tgid = task->tgid;
@@ -275,18 +287,18 @@ void proc_exit_connector(struct task_struct *task)
275{ 287{
276 struct cn_msg *msg; 288 struct cn_msg *msg;
277 struct proc_event *ev; 289 struct proc_event *ev;
278 __u8 buffer[CN_PROC_MSG_SIZE]; 290 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
279 struct timespec ts; 291 struct timespec ts;
280 292
281 if (atomic_read(&proc_event_num_listeners) < 1) 293 if (atomic_read(&proc_event_num_listeners) < 1)
282 return; 294 return;
283 295
284 msg = (struct cn_msg *)buffer; 296 msg = buffer_to_cn_msg(buffer);
285 ev = (struct proc_event *)msg->data; 297 ev = (struct proc_event *)msg->data;
286 memset(&ev->event_data, 0, sizeof(ev->event_data)); 298 memset(&ev->event_data, 0, sizeof(ev->event_data));
287 get_seq(&msg->seq, &ev->cpu); 299 get_seq(&msg->seq, &ev->cpu);
288 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 300 ktime_get_ts(&ts); /* get high res monotonic timestamp */
289 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 301 ev->timestamp_ns = timespec_to_ns(&ts);
290 ev->what = PROC_EVENT_EXIT; 302 ev->what = PROC_EVENT_EXIT;
291 ev->event_data.exit.process_pid = task->pid; 303 ev->event_data.exit.process_pid = task->pid;
292 ev->event_data.exit.process_tgid = task->tgid; 304 ev->event_data.exit.process_tgid = task->tgid;
@@ -312,18 +324,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
312{ 324{
313 struct cn_msg *msg; 325 struct cn_msg *msg;
314 struct proc_event *ev; 326 struct proc_event *ev;
315 __u8 buffer[CN_PROC_MSG_SIZE]; 327 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
316 struct timespec ts; 328 struct timespec ts;
317 329
318 if (atomic_read(&proc_event_num_listeners) < 1) 330 if (atomic_read(&proc_event_num_listeners) < 1)
319 return; 331 return;
320 332
321 msg = (struct cn_msg *)buffer; 333 msg = buffer_to_cn_msg(buffer);
322 ev = (struct proc_event *)msg->data; 334 ev = (struct proc_event *)msg->data;
323 memset(&ev->event_data, 0, sizeof(ev->event_data)); 335 memset(&ev->event_data, 0, sizeof(ev->event_data));
324 msg->seq = rcvd_seq; 336 msg->seq = rcvd_seq;
325 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 337 ktime_get_ts(&ts); /* get high res monotonic timestamp */
326 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 338 ev->timestamp_ns = timespec_to_ns(&ts);
327 ev->cpu = -1; 339 ev->cpu = -1;
328 ev->what = PROC_EVENT_NONE; 340 ev->what = PROC_EVENT_NONE;
329 ev->event_data.ack.err = err; 341 ev->event_data.ack.err = err;
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index 856ad80418ae..7c03dd84f66a 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -58,7 +58,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
58 return 0; 58 return 0;
59} 59}
60 60
61static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) 61static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
62{ 62{
63 unsigned int frequency, rate, min_freq; 63 unsigned int frequency, rate, min_freq;
64 int retval, steps, i; 64 int retval, steps, i;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 218460fcd2e4..25a70d06c5bf 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -68,6 +68,9 @@ static void cs_check_cpu(int cpu, unsigned int load)
68 68
69 dbs_info->requested_freq += get_freq_target(cs_tuners, policy); 69 dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
70 70
71 if (dbs_info->requested_freq > policy->max)
72 dbs_info->requested_freq = policy->max;
73
71 __cpufreq_driver_target(policy, dbs_info->requested_freq, 74 __cpufreq_driver_target(policy, dbs_info->requested_freq,
72 CPUFREQ_RELATION_H); 75 CPUFREQ_RELATION_H);
73 return; 76 return;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 0806c31e5764..e6be63561fa6 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -328,10 +328,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
328 dbs_data->cdata->gov_dbs_timer); 328 dbs_data->cdata->gov_dbs_timer);
329 } 329 }
330 330
331 /*
332 * conservative does not implement micro like ondemand
333 * governor, thus we are bound to jiffes/HZ
334 */
335 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 331 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
336 cs_dbs_info->down_skip = 0; 332 cs_dbs_info->down_skip = 0;
337 cs_dbs_info->enable = 1; 333 cs_dbs_info->enable = 1;
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index f2c75065ce19..dfd1643b0b2f 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -157,4 +157,3 @@ err_moutcore:
157 pr_debug("%s: failed initialization\n", __func__); 157 pr_debug("%s: failed initialization\n", __func__);
158 return -EINVAL; 158 return -EINVAL;
159} 159}
160EXPORT_SYMBOL(exynos4210_cpufreq_init);
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 8683304ce62c..efad5e657f6f 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -211,4 +211,3 @@ err_moutcore:
211 pr_debug("%s: failed initialization\n", __func__); 211 pr_debug("%s: failed initialization\n", __func__);
212 return -EINVAL; 212 return -EINVAL;
213} 213}
214EXPORT_SYMBOL(exynos4x12_cpufreq_init);
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 9fae466d7746..8feda86fe42c 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -236,4 +236,3 @@ err_moutcore:
236 pr_err("%s: failed initialization\n", __func__); 236 pr_err("%s: failed initialization\n", __func__);
237 return -EINVAL; 237 return -EINVAL;
238} 238}
239EXPORT_SYMBOL(exynos5250_cpufreq_init);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index be6d14307aa8..a0acd0bfba40 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -53,6 +53,7 @@ static unsigned int omap_getspeed(unsigned int cpu)
53 53
54static int omap_target(struct cpufreq_policy *policy, unsigned int index) 54static int omap_target(struct cpufreq_policy *policy, unsigned int index)
55{ 55{
56 int r, ret;
56 struct dev_pm_opp *opp; 57 struct dev_pm_opp *opp;
57 unsigned long freq, volt = 0, volt_old = 0, tol = 0; 58 unsigned long freq, volt = 0, volt_old = 0, tol = 0;
58 unsigned int old_freq, new_freq; 59 unsigned int old_freq, new_freq;
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index f42df7ec03c5..b7309c37033d 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -142,10 +142,8 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
142 142
143 mutex_lock(&tegra_cpu_lock); 143 mutex_lock(&tegra_cpu_lock);
144 144
145 if (is_suspended) { 145 if (is_suspended)
146 ret = -EBUSY;
147 goto out; 146 goto out;
148 }
149 147
150 freq = freq_table[index].frequency; 148 freq = freq_table[index].frequency;
151 149
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2a991e468f78..a55e68f2cfc8 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -400,7 +400,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
400 */ 400 */
401void cpuidle_unregister_device(struct cpuidle_device *dev) 401void cpuidle_unregister_device(struct cpuidle_device *dev)
402{ 402{
403 if (dev->registered == 0) 403 if (!dev || dev->registered == 0)
404 return; 404 return;
405 405
406 cpuidle_pause_and_lock(); 406 cpuidle_pause_and_lock();
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index ca89f6b84b06..e7555ff4cafd 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM
4 help 4 help
5 Enables the driver module for Freescale's Cryptographic Accelerator 5 Enables the driver module for Freescale's Cryptographic Accelerator
6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). 6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
7 This module adds a job ring operation interface, and configures h/w 7 This module creates job ring devices, and configures h/w
8 to operate as a DPAA component automatically, depending 8 to operate as a DPAA component automatically, depending
9 on h/w feature availability. 9 on h/w feature availability.
10 10
11 To compile this driver as a module, choose M here: the module 11 To compile this driver as a module, choose M here: the module
12 will be called caam. 12 will be called caam.
13 13
14config CRYPTO_DEV_FSL_CAAM_JR
15 tristate "Freescale CAAM Job Ring driver backend"
16 depends on CRYPTO_DEV_FSL_CAAM
17 default y
18 help
19 Enables the driver module for Job Rings which are part of
20 Freescale's Cryptographic Accelerator
21 and Assurance Module (CAAM). This module adds a job ring operation
22 interface.
23
24 To compile this driver as a module, choose M here: the module
25 will be called caam_jr.
26
14config CRYPTO_DEV_FSL_CAAM_RINGSIZE 27config CRYPTO_DEV_FSL_CAAM_RINGSIZE
15 int "Job Ring size" 28 int "Job Ring size"
16 depends on CRYPTO_DEV_FSL_CAAM 29 depends on CRYPTO_DEV_FSL_CAAM_JR
17 range 2 9 30 range 2 9
18 default "9" 31 default "9"
19 help 32 help
@@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
31 44
32config CRYPTO_DEV_FSL_CAAM_INTC 45config CRYPTO_DEV_FSL_CAAM_INTC
33 bool "Job Ring interrupt coalescing" 46 bool "Job Ring interrupt coalescing"
34 depends on CRYPTO_DEV_FSL_CAAM 47 depends on CRYPTO_DEV_FSL_CAAM_JR
35 default n 48 default n
36 help 49 help
37 Enable the Job Ring's interrupt coalescing feature. 50 Enable the Job Ring's interrupt coalescing feature.
@@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
62 75
63config CRYPTO_DEV_FSL_CAAM_CRYPTO_API 76config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
64 tristate "Register algorithm implementations with the Crypto API" 77 tristate "Register algorithm implementations with the Crypto API"
65 depends on CRYPTO_DEV_FSL_CAAM 78 depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
66 default y 79 default y
67 select CRYPTO_ALGAPI 80 select CRYPTO_ALGAPI
68 select CRYPTO_AUTHENC 81 select CRYPTO_AUTHENC
@@ -76,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
76 89
77config CRYPTO_DEV_FSL_CAAM_AHASH_API 90config CRYPTO_DEV_FSL_CAAM_AHASH_API
78 tristate "Register hash algorithm implementations with Crypto API" 91 tristate "Register hash algorithm implementations with Crypto API"
79 depends on CRYPTO_DEV_FSL_CAAM 92 depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
80 default y 93 default y
81 select CRYPTO_HASH 94 select CRYPTO_HASH
82 help 95 help
@@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
88 101
89config CRYPTO_DEV_FSL_CAAM_RNG_API 102config CRYPTO_DEV_FSL_CAAM_RNG_API
90 tristate "Register caam device for hwrng API" 103 tristate "Register caam device for hwrng API"
91 depends on CRYPTO_DEV_FSL_CAAM 104 depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
92 default y 105 default y
93 select CRYPTO_RNG 106 select CRYPTO_RNG
94 select HW_RANDOM 107 select HW_RANDOM
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index d56bd0ec65d8..550758a333e7 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,8 +6,10 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
6endif 6endif
7 7
8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o 11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o 12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
12 13
13caam-objs := ctrl.o jr.o error.o key_gen.o 14caam-objs := ctrl.o
15caam_jr-objs := jr.o key_gen.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 7c63b72ecd75..4cf5dec826e1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -86,6 +86,7 @@
86#else 86#else
87#define debug(format, arg...) 87#define debug(format, arg...)
88#endif 88#endif
89static struct list_head alg_list;
89 90
90/* Set DK bit in class 1 operation if shared */ 91/* Set DK bit in class 1 operation if shared */
91static inline void append_dec_op1(u32 *desc, u32 type) 92static inline void append_dec_op1(u32 *desc, u32 type)
@@ -817,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
817 ivsize, 1); 818 ivsize, 1);
818 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 819 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
819 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), 820 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
820 req->cryptlen, 1); 821 req->cryptlen - ctx->authsize, 1);
821#endif 822#endif
822 823
823 if (err) { 824 if (err) {
@@ -971,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
971 (edesc->src_nents ? : 1); 972 (edesc->src_nents ? : 1);
972 in_options = LDST_SGF; 973 in_options = LDST_SGF;
973 } 974 }
974 if (encrypt) 975
975 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 976 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
976 req->cryptlen - authsize, in_options); 977 in_options);
977 else
978 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
979 req->cryptlen, in_options);
980 978
981 if (likely(req->src == req->dst)) { 979 if (likely(req->src == req->dst)) {
982 if (all_contig) { 980 if (all_contig) {
@@ -997,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
997 } 995 }
998 } 996 }
999 if (encrypt) 997 if (encrypt)
1000 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); 998 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
999 out_options);
1001 else 1000 else
1002 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, 1001 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1003 out_options); 1002 out_options);
@@ -1047,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1047 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; 1046 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1048 in_options = LDST_SGF; 1047 in_options = LDST_SGF;
1049 } 1048 }
1050 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 1049 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1051 req->cryptlen - authsize, in_options); 1050 in_options);
1052 1051
1053 if (contig & GIV_DST_CONTIG) { 1052 if (contig & GIV_DST_CONTIG) {
1054 dst_dma = edesc->iv_dma; 1053 dst_dma = edesc->iv_dma;
@@ -1065,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1065 } 1064 }
1066 } 1065 }
1067 1066
1068 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); 1067 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1068 out_options);
1069} 1069}
1070 1070
1071/* 1071/*
@@ -1129,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1129 * allocate and map the aead extended descriptor 1129 * allocate and map the aead extended descriptor
1130 */ 1130 */
1131static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1131static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1132 int desc_bytes, bool *all_contig_ptr) 1132 int desc_bytes, bool *all_contig_ptr,
1133 bool encrypt)
1133{ 1134{
1134 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1135 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1135 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1136 struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1144,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1144 bool assoc_chained = false, src_chained = false, dst_chained = false; 1145 bool assoc_chained = false, src_chained = false, dst_chained = false;
1145 int ivsize = crypto_aead_ivsize(aead); 1146 int ivsize = crypto_aead_ivsize(aead);
1146 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1147 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1148 unsigned int authsize = ctx->authsize;
1147 1149
1148 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 1150 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1149 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1150 1151
1151 if (unlikely(req->dst != req->src)) 1152 if (unlikely(req->dst != req->src)) {
1152 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1153 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1154 dst_nents = sg_count(req->dst,
1155 req->cryptlen +
1156 (encrypt ? authsize : (-authsize)),
1157 &dst_chained);
1158 } else {
1159 src_nents = sg_count(req->src,
1160 req->cryptlen +
1161 (encrypt ? authsize : 0),
1162 &src_chained);
1163 }
1153 1164
1154 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1165 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1155 DMA_TO_DEVICE, assoc_chained); 1166 DMA_TO_DEVICE, assoc_chained);
@@ -1233,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req)
1233 u32 *desc; 1244 u32 *desc;
1234 int ret = 0; 1245 int ret = 0;
1235 1246
1236 req->cryptlen += ctx->authsize;
1237
1238 /* allocate extended descriptor */ 1247 /* allocate extended descriptor */
1239 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1248 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1240 CAAM_CMD_SZ, &all_contig); 1249 CAAM_CMD_SZ, &all_contig, true);
1241 if (IS_ERR(edesc)) 1250 if (IS_ERR(edesc))
1242 return PTR_ERR(edesc); 1251 return PTR_ERR(edesc);
1243 1252
@@ -1274,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req)
1274 1283
1275 /* allocate extended descriptor */ 1284 /* allocate extended descriptor */
1276 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1285 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1277 CAAM_CMD_SZ, &all_contig); 1286 CAAM_CMD_SZ, &all_contig, false);
1278 if (IS_ERR(edesc)) 1287 if (IS_ERR(edesc))
1279 return PTR_ERR(edesc); 1288 return PTR_ERR(edesc);
1280 1289
@@ -1331,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1331 src_nents = sg_count(req->src, req->cryptlen, &src_chained); 1340 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1332 1341
1333 if (unlikely(req->dst != req->src)) 1342 if (unlikely(req->dst != req->src))
1334 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1343 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1344 &dst_chained);
1335 1345
1336 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1346 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1337 DMA_TO_DEVICE, assoc_chained); 1347 DMA_TO_DEVICE, assoc_chained);
@@ -1425,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
1425 u32 *desc; 1435 u32 *desc;
1426 int ret = 0; 1436 int ret = 0;
1427 1437
1428 req->cryptlen += ctx->authsize;
1429
1430 /* allocate extended descriptor */ 1438 /* allocate extended descriptor */
1431 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * 1439 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1432 CAAM_CMD_SZ, &contig); 1440 CAAM_CMD_SZ, &contig);
@@ -2057,7 +2065,6 @@ static struct caam_alg_template driver_algs[] = {
2057 2065
2058struct caam_crypto_alg { 2066struct caam_crypto_alg {
2059 struct list_head entry; 2067 struct list_head entry;
2060 struct device *ctrldev;
2061 int class1_alg_type; 2068 int class1_alg_type;
2062 int class2_alg_type; 2069 int class2_alg_type;
2063 int alg_op; 2070 int alg_op;
@@ -2070,14 +2077,12 @@ static int caam_cra_init(struct crypto_tfm *tfm)
2070 struct caam_crypto_alg *caam_alg = 2077 struct caam_crypto_alg *caam_alg =
2071 container_of(alg, struct caam_crypto_alg, crypto_alg); 2078 container_of(alg, struct caam_crypto_alg, crypto_alg);
2072 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2079 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2073 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2074 int tgt_jr = atomic_inc_return(&priv->tfm_count);
2075 2080
2076 /* 2081 ctx->jrdev = caam_jr_alloc();
2077 * distribute tfms across job rings to ensure in-order 2082 if (IS_ERR(ctx->jrdev)) {
2078 * crypto request processing per tfm 2083 pr_err("Job Ring Device allocation for transform failed\n");
2079 */ 2084 return PTR_ERR(ctx->jrdev);
2080 ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; 2085 }
2081 2086
2082 /* copy descriptor header template value */ 2087 /* copy descriptor header template value */
2083 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; 2088 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
@@ -2104,44 +2109,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
2104 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, 2109 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2105 desc_bytes(ctx->sh_desc_givenc), 2110 desc_bytes(ctx->sh_desc_givenc),
2106 DMA_TO_DEVICE); 2111 DMA_TO_DEVICE);
2112
2113 caam_jr_free(ctx->jrdev);
2107} 2114}
2108 2115
2109static void __exit caam_algapi_exit(void) 2116static void __exit caam_algapi_exit(void)
2110{ 2117{
2111 2118
2112 struct device_node *dev_node;
2113 struct platform_device *pdev;
2114 struct device *ctrldev;
2115 struct caam_drv_private *priv;
2116 struct caam_crypto_alg *t_alg, *n; 2119 struct caam_crypto_alg *t_alg, *n;
2117 2120
2118 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2121 if (!alg_list.next)
2119 if (!dev_node) {
2120 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2121 if (!dev_node)
2122 return;
2123 }
2124
2125 pdev = of_find_device_by_node(dev_node);
2126 if (!pdev)
2127 return;
2128
2129 ctrldev = &pdev->dev;
2130 of_node_put(dev_node);
2131 priv = dev_get_drvdata(ctrldev);
2132
2133 if (!priv->alg_list.next)
2134 return; 2122 return;
2135 2123
2136 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 2124 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2137 crypto_unregister_alg(&t_alg->crypto_alg); 2125 crypto_unregister_alg(&t_alg->crypto_alg);
2138 list_del(&t_alg->entry); 2126 list_del(&t_alg->entry);
2139 kfree(t_alg); 2127 kfree(t_alg);
2140 } 2128 }
2141} 2129}
2142 2130
2143static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, 2131static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2144 struct caam_alg_template
2145 *template) 2132 *template)
2146{ 2133{
2147 struct caam_crypto_alg *t_alg; 2134 struct caam_crypto_alg *t_alg;
@@ -2149,7 +2136,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2149 2136
2150 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); 2137 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2151 if (!t_alg) { 2138 if (!t_alg) {
2152 dev_err(ctrldev, "failed to allocate t_alg\n"); 2139 pr_err("failed to allocate t_alg\n");
2153 return ERR_PTR(-ENOMEM); 2140 return ERR_PTR(-ENOMEM);
2154 } 2141 }
2155 2142
@@ -2181,62 +2168,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2181 t_alg->class1_alg_type = template->class1_alg_type; 2168 t_alg->class1_alg_type = template->class1_alg_type;
2182 t_alg->class2_alg_type = template->class2_alg_type; 2169 t_alg->class2_alg_type = template->class2_alg_type;
2183 t_alg->alg_op = template->alg_op; 2170 t_alg->alg_op = template->alg_op;
2184 t_alg->ctrldev = ctrldev;
2185 2171
2186 return t_alg; 2172 return t_alg;
2187} 2173}
2188 2174
2189static int __init caam_algapi_init(void) 2175static int __init caam_algapi_init(void)
2190{ 2176{
2191 struct device_node *dev_node;
2192 struct platform_device *pdev;
2193 struct device *ctrldev;
2194 struct caam_drv_private *priv;
2195 int i = 0, err = 0; 2177 int i = 0, err = 0;
2196 2178
2197 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2179 INIT_LIST_HEAD(&alg_list);
2198 if (!dev_node) {
2199 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2200 if (!dev_node)
2201 return -ENODEV;
2202 }
2203
2204 pdev = of_find_device_by_node(dev_node);
2205 if (!pdev)
2206 return -ENODEV;
2207
2208 ctrldev = &pdev->dev;
2209 priv = dev_get_drvdata(ctrldev);
2210 of_node_put(dev_node);
2211
2212 INIT_LIST_HEAD(&priv->alg_list);
2213
2214 atomic_set(&priv->tfm_count, -1);
2215 2180
2216 /* register crypto algorithms the device supports */ 2181 /* register crypto algorithms the device supports */
2217 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2182 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2218 /* TODO: check if h/w supports alg */ 2183 /* TODO: check if h/w supports alg */
2219 struct caam_crypto_alg *t_alg; 2184 struct caam_crypto_alg *t_alg;
2220 2185
2221 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); 2186 t_alg = caam_alg_alloc(&driver_algs[i]);
2222 if (IS_ERR(t_alg)) { 2187 if (IS_ERR(t_alg)) {
2223 err = PTR_ERR(t_alg); 2188 err = PTR_ERR(t_alg);
2224 dev_warn(ctrldev, "%s alg allocation failed\n", 2189 pr_warn("%s alg allocation failed\n",
2225 driver_algs[i].driver_name); 2190 driver_algs[i].driver_name);
2226 continue; 2191 continue;
2227 } 2192 }
2228 2193
2229 err = crypto_register_alg(&t_alg->crypto_alg); 2194 err = crypto_register_alg(&t_alg->crypto_alg);
2230 if (err) { 2195 if (err) {
2231 dev_warn(ctrldev, "%s alg registration failed\n", 2196 pr_warn("%s alg registration failed\n",
2232 t_alg->crypto_alg.cra_driver_name); 2197 t_alg->crypto_alg.cra_driver_name);
2233 kfree(t_alg); 2198 kfree(t_alg);
2234 } else 2199 } else
2235 list_add_tail(&t_alg->entry, &priv->alg_list); 2200 list_add_tail(&t_alg->entry, &alg_list);
2236 } 2201 }
2237 if (!list_empty(&priv->alg_list)) 2202 if (!list_empty(&alg_list))
2238 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", 2203 pr_info("caam algorithms registered in /proc/crypto\n");
2239 (char *)of_get_property(dev_node, "compatible", NULL));
2240 2204
2241 return err; 2205 return err;
2242} 2206}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e732bd962e98..0378328f47a7 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -94,6 +94,9 @@
94#define debug(format, arg...) 94#define debug(format, arg...)
95#endif 95#endif
96 96
97
98static struct list_head hash_list;
99
97/* ahash per-session context */ 100/* ahash per-session context */
98struct caam_hash_ctx { 101struct caam_hash_ctx {
99 struct device *jrdev; 102 struct device *jrdev;
@@ -1653,7 +1656,6 @@ static struct caam_hash_template driver_hash[] = {
1653 1656
1654struct caam_hash_alg { 1657struct caam_hash_alg {
1655 struct list_head entry; 1658 struct list_head entry;
1656 struct device *ctrldev;
1657 int alg_type; 1659 int alg_type;
1658 int alg_op; 1660 int alg_op;
1659 struct ahash_alg ahash_alg; 1661 struct ahash_alg ahash_alg;
@@ -1670,7 +1672,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
1670 struct caam_hash_alg *caam_hash = 1672 struct caam_hash_alg *caam_hash =
1671 container_of(alg, struct caam_hash_alg, ahash_alg); 1673 container_of(alg, struct caam_hash_alg, ahash_alg);
1672 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1674 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1673 struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
1674 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1675 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1675 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1676 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1676 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1677 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -1678,15 +1679,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
1678 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1679 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1679 HASH_MSG_LEN + 64, 1680 HASH_MSG_LEN + 64,
1680 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1681 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1681 int tgt_jr = atomic_inc_return(&priv->tfm_count);
1682 int ret = 0; 1682 int ret = 0;
1683 1683
1684 /* 1684 /*
1685 * distribute tfms across job rings to ensure in-order 1685 * Get a Job ring from Job Ring driver to ensure in-order
1686 * crypto request processing per tfm 1686 * crypto request processing per tfm
1687 */ 1687 */
1688 ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs]; 1688 ctx->jrdev = caam_jr_alloc();
1689 1689 if (IS_ERR(ctx->jrdev)) {
1690 pr_err("Job Ring Device allocation for transform failed\n");
1691 return PTR_ERR(ctx->jrdev);
1692 }
1690 /* copy descriptor header template value */ 1693 /* copy descriptor header template value */
1691 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1694 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1692 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; 1695 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
@@ -1729,35 +1732,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1729 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) 1732 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1730 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, 1733 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1731 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); 1734 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1735
1736 caam_jr_free(ctx->jrdev);
1732} 1737}
1733 1738
1734static void __exit caam_algapi_hash_exit(void) 1739static void __exit caam_algapi_hash_exit(void)
1735{ 1740{
1736 struct device_node *dev_node;
1737 struct platform_device *pdev;
1738 struct device *ctrldev;
1739 struct caam_drv_private *priv;
1740 struct caam_hash_alg *t_alg, *n; 1741 struct caam_hash_alg *t_alg, *n;
1741 1742
1742 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1743 if (!hash_list.next)
1743 if (!dev_node) {
1744 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1745 if (!dev_node)
1746 return;
1747 }
1748
1749 pdev = of_find_device_by_node(dev_node);
1750 if (!pdev)
1751 return; 1744 return;
1752 1745
1753 ctrldev = &pdev->dev; 1746 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1754 of_node_put(dev_node);
1755 priv = dev_get_drvdata(ctrldev);
1756
1757 if (!priv->hash_list.next)
1758 return;
1759
1760 list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
1761 crypto_unregister_ahash(&t_alg->ahash_alg); 1747 crypto_unregister_ahash(&t_alg->ahash_alg);
1762 list_del(&t_alg->entry); 1748 list_del(&t_alg->entry);
1763 kfree(t_alg); 1749 kfree(t_alg);
@@ -1765,7 +1751,7 @@ static void __exit caam_algapi_hash_exit(void)
1765} 1751}
1766 1752
1767static struct caam_hash_alg * 1753static struct caam_hash_alg *
1768caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, 1754caam_hash_alloc(struct caam_hash_template *template,
1769 bool keyed) 1755 bool keyed)
1770{ 1756{
1771 struct caam_hash_alg *t_alg; 1757 struct caam_hash_alg *t_alg;
@@ -1774,7 +1760,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1774 1760
1775 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); 1761 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1776 if (!t_alg) { 1762 if (!t_alg) {
1777 dev_err(ctrldev, "failed to allocate t_alg\n"); 1763 pr_err("failed to allocate t_alg\n");
1778 return ERR_PTR(-ENOMEM); 1764 return ERR_PTR(-ENOMEM);
1779 } 1765 }
1780 1766
@@ -1805,37 +1791,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1805 1791
1806 t_alg->alg_type = template->alg_type; 1792 t_alg->alg_type = template->alg_type;
1807 t_alg->alg_op = template->alg_op; 1793 t_alg->alg_op = template->alg_op;
1808 t_alg->ctrldev = ctrldev;
1809 1794
1810 return t_alg; 1795 return t_alg;
1811} 1796}
1812 1797
1813static int __init caam_algapi_hash_init(void) 1798static int __init caam_algapi_hash_init(void)
1814{ 1799{
1815 struct device_node *dev_node;
1816 struct platform_device *pdev;
1817 struct device *ctrldev;
1818 struct caam_drv_private *priv;
1819 int i = 0, err = 0; 1800 int i = 0, err = 0;
1820 1801
1821 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1802 INIT_LIST_HEAD(&hash_list);
1822 if (!dev_node) {
1823 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1824 if (!dev_node)
1825 return -ENODEV;
1826 }
1827
1828 pdev = of_find_device_by_node(dev_node);
1829 if (!pdev)
1830 return -ENODEV;
1831
1832 ctrldev = &pdev->dev;
1833 priv = dev_get_drvdata(ctrldev);
1834 of_node_put(dev_node);
1835
1836 INIT_LIST_HEAD(&priv->hash_list);
1837
1838 atomic_set(&priv->tfm_count, -1);
1839 1803
1840 /* register crypto algorithms the device supports */ 1804 /* register crypto algorithms the device supports */
1841 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1805 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
@@ -1843,38 +1807,38 @@ static int __init caam_algapi_hash_init(void)
1843 struct caam_hash_alg *t_alg; 1807 struct caam_hash_alg *t_alg;
1844 1808
1845 /* register hmac version */ 1809 /* register hmac version */
1846 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true); 1810 t_alg = caam_hash_alloc(&driver_hash[i], true);
1847 if (IS_ERR(t_alg)) { 1811 if (IS_ERR(t_alg)) {
1848 err = PTR_ERR(t_alg); 1812 err = PTR_ERR(t_alg);
1849 dev_warn(ctrldev, "%s alg allocation failed\n", 1813 pr_warn("%s alg allocation failed\n",
1850 driver_hash[i].driver_name); 1814 driver_hash[i].driver_name);
1851 continue; 1815 continue;
1852 } 1816 }
1853 1817
1854 err = crypto_register_ahash(&t_alg->ahash_alg); 1818 err = crypto_register_ahash(&t_alg->ahash_alg);
1855 if (err) { 1819 if (err) {
1856 dev_warn(ctrldev, "%s alg registration failed\n", 1820 pr_warn("%s alg registration failed\n",
1857 t_alg->ahash_alg.halg.base.cra_driver_name); 1821 t_alg->ahash_alg.halg.base.cra_driver_name);
1858 kfree(t_alg); 1822 kfree(t_alg);
1859 } else 1823 } else
1860 list_add_tail(&t_alg->entry, &priv->hash_list); 1824 list_add_tail(&t_alg->entry, &hash_list);
1861 1825
1862 /* register unkeyed version */ 1826 /* register unkeyed version */
1863 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false); 1827 t_alg = caam_hash_alloc(&driver_hash[i], false);
1864 if (IS_ERR(t_alg)) { 1828 if (IS_ERR(t_alg)) {
1865 err = PTR_ERR(t_alg); 1829 err = PTR_ERR(t_alg);
1866 dev_warn(ctrldev, "%s alg allocation failed\n", 1830 pr_warn("%s alg allocation failed\n",
1867 driver_hash[i].driver_name); 1831 driver_hash[i].driver_name);
1868 continue; 1832 continue;
1869 } 1833 }
1870 1834
1871 err = crypto_register_ahash(&t_alg->ahash_alg); 1835 err = crypto_register_ahash(&t_alg->ahash_alg);
1872 if (err) { 1836 if (err) {
1873 dev_warn(ctrldev, "%s alg registration failed\n", 1837 pr_warn("%s alg registration failed\n",
1874 t_alg->ahash_alg.halg.base.cra_driver_name); 1838 t_alg->ahash_alg.halg.base.cra_driver_name);
1875 kfree(t_alg); 1839 kfree(t_alg);
1876 } else 1840 } else
1877 list_add_tail(&t_alg->entry, &priv->hash_list); 1841 list_add_tail(&t_alg->entry, &hash_list);
1878 } 1842 }
1879 1843
1880 return err; 1844 return err;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index d1939a9539c0..28486b19fc36 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -273,34 +273,23 @@ static struct hwrng caam_rng = {
273 273
274static void __exit caam_rng_exit(void) 274static void __exit caam_rng_exit(void)
275{ 275{
276 caam_jr_free(rng_ctx.jrdev);
276 hwrng_unregister(&caam_rng); 277 hwrng_unregister(&caam_rng);
277} 278}
278 279
279static int __init caam_rng_init(void) 280static int __init caam_rng_init(void)
280{ 281{
281 struct device_node *dev_node; 282 struct device *dev;
282 struct platform_device *pdev;
283 struct device *ctrldev;
284 struct caam_drv_private *priv;
285
286 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
287 if (!dev_node) {
288 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
289 if (!dev_node)
290 return -ENODEV;
291 }
292
293 pdev = of_find_device_by_node(dev_node);
294 if (!pdev)
295 return -ENODEV;
296 283
297 ctrldev = &pdev->dev; 284 dev = caam_jr_alloc();
298 priv = dev_get_drvdata(ctrldev); 285 if (IS_ERR(dev)) {
299 of_node_put(dev_node); 286 pr_err("Job Ring Device allocation for transform failed\n");
287 return PTR_ERR(dev);
288 }
300 289
301 caam_init_rng(&rng_ctx, priv->jrdev[0]); 290 caam_init_rng(&rng_ctx, dev);
302 291
303 dev_info(priv->jrdev[0], "registering rng-caam\n"); 292 dev_info(dev, "registering rng-caam\n");
304 return hwrng_register(&caam_rng); 293 return hwrng_register(&caam_rng);
305} 294}
306 295
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bc6d820812b6..63fb1af2c431 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -16,82 +16,75 @@
16#include "error.h" 16#include "error.h"
17#include "ctrl.h" 17#include "ctrl.h"
18 18
19static int caam_remove(struct platform_device *pdev)
20{
21 struct device *ctrldev;
22 struct caam_drv_private *ctrlpriv;
23 struct caam_drv_private_jr *jrpriv;
24 struct caam_full __iomem *topregs;
25 int ring, ret = 0;
26
27 ctrldev = &pdev->dev;
28 ctrlpriv = dev_get_drvdata(ctrldev);
29 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
30
31 /* shut down JobRs */
32 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
33 ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
34 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
35 irq_dispose_mapping(jrpriv->irq);
36 }
37
38 /* Shut down debug views */
39#ifdef CONFIG_DEBUG_FS
40 debugfs_remove_recursive(ctrlpriv->dfs_root);
41#endif
42
43 /* Unmap controller region */
44 iounmap(&topregs->ctrl);
45
46 kfree(ctrlpriv->jrdev);
47 kfree(ctrlpriv);
48
49 return ret;
50}
51
52/* 19/*
53 * Descriptor to instantiate RNG State Handle 0 in normal mode and 20 * Descriptor to instantiate RNG State Handle 0 in normal mode and
54 * load the JDKEK, TDKEK and TDSK registers 21 * load the JDKEK, TDKEK and TDSK registers
55 */ 22 */
56static void build_instantiation_desc(u32 *desc) 23static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
57{ 24{
58 u32 *jump_cmd; 25 u32 *jump_cmd, op_flags;
59 26
60 init_job_desc(desc, 0); 27 init_job_desc(desc, 0);
61 28
29 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
30 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
31
62 /* INIT RNG in non-test mode */ 32 /* INIT RNG in non-test mode */
63 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | 33 append_operation(desc, op_flags);
64 OP_ALG_AS_INIT); 34
35 if (!handle && do_sk) {
36 /*
37 * For SH0, Secure Keys must be generated as well
38 */
39
40 /* wait for done */
41 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
42 set_jump_tgt_here(desc, jump_cmd);
43
44 /*
45 * load 1 to clear written reg:
46 * resets the done interrrupt and returns the RNG to idle.
47 */
48 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
49
50 /* Initialize State Handle */
51 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
52 OP_ALG_AAI_RNG4_SK);
53 }
65 54
66 /* wait for done */ 55 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
67 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); 56}
68 set_jump_tgt_here(desc, jump_cmd);
69 57
70 /* 58/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
71 * load 1 to clear written reg: 59static void build_deinstantiation_desc(u32 *desc, int handle)
72 * resets the done interrupt and returns the RNG to idle. 60{
73 */ 61 init_job_desc(desc, 0);
74 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
75 62
76 /* generate secure keys (non-test) */ 63 /* Uninstantiate State Handle 0 */
77 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | 64 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
78 OP_ALG_RNG4_SK); 65 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
66
67 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
79} 68}
80 69
81static int instantiate_rng(struct device *ctrldev) 70/*
71 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
72 * the software (no JR/QI used).
73 * @ctrldev - pointer to device
74 * @status - descriptor status, after being run
75 *
76 * Return: - 0 if no error occurred
77 * - -ENODEV if the DECO couldn't be acquired
78 * - -EAGAIN if an error occurred while executing the descriptor
79 */
80static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
81 u32 *status)
82{ 82{
83 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 83 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
84 struct caam_full __iomem *topregs; 84 struct caam_full __iomem *topregs;
85 unsigned int timeout = 100000; 85 unsigned int timeout = 100000;
86 u32 *desc; 86 u32 deco_dbg_reg, flags;
87 int i, ret = 0; 87 int i;
88
89 desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
90 if (!desc) {
91 dev_err(ctrldev, "can't allocate RNG init descriptor memory\n");
92 return -ENOMEM;
93 }
94 build_instantiation_desc(desc);
95 88
96 /* Set the bit to request direct access to DECO0 */ 89 /* Set the bit to request direct access to DECO0 */
97 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 90 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
@@ -103,36 +96,219 @@ static int instantiate_rng(struct device *ctrldev)
103 96
104 if (!timeout) { 97 if (!timeout) {
105 dev_err(ctrldev, "failed to acquire DECO 0\n"); 98 dev_err(ctrldev, "failed to acquire DECO 0\n");
106 ret = -EIO; 99 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
107 goto out; 100 return -ENODEV;
108 } 101 }
109 102
110 for (i = 0; i < desc_len(desc); i++) 103 for (i = 0; i < desc_len(desc); i++)
111 topregs->deco.descbuf[i] = *(desc + i); 104 wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
105
106 flags = DECO_JQCR_WHL;
107 /*
108 * If the descriptor length is longer than 4 words, then the
109 * FOUR bit in JRCTRL register must be set.
110 */
111 if (desc_len(desc) >= 4)
112 flags |= DECO_JQCR_FOUR;
112 113
113 wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR); 114 /* Instruct the DECO to execute it */
115 wr_reg32(&topregs->deco.jr_ctl_hi, flags);
114 116
115 timeout = 10000000; 117 timeout = 10000000;
116 while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) && 118 do {
117 --timeout) 119 deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
120 /*
121 * If an error occured in the descriptor, then
122 * the DECO status field will be set to 0x0D
123 */
124 if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
125 DESC_DBG_DECO_STAT_HOST_ERR)
126 break;
118 cpu_relax(); 127 cpu_relax();
128 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
119 129
120 if (!timeout) { 130 *status = rd_reg32(&topregs->deco.op_status_hi) &
121 dev_err(ctrldev, "failed to instantiate RNG\n"); 131 DECO_OP_STATUS_HI_ERR_MASK;
122 ret = -EIO;
123 }
124 132
133 /* Mark the DECO as free */
125 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 134 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
126out: 135
136 if (!timeout)
137 return -EAGAIN;
138
139 return 0;
140}
141
142/*
143 * instantiate_rng - builds and executes a descriptor on DECO0,
144 * which initializes the RNG block.
145 * @ctrldev - pointer to device
146 * @state_handle_mask - bitmask containing the instantiation status
147 * for the RNG4 state handles which exist in
148 * the RNG4 block: 1 if it's been instantiated
149 * by an external entry, 0 otherwise.
150 * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
151 * Caution: this can be done only once; if the keys need to be
152 * regenerated, a POR is required
153 *
154 * Return: - 0 if no error occurred
155 * - -ENOMEM if there isn't enough memory to allocate the descriptor
156 * - -ENODEV if DECO0 couldn't be acquired
157 * - -EAGAIN if an error occurred when executing the descriptor
158 * f.i. there was a RNG hardware error due to not "good enough"
159 * entropy being aquired.
160 */
161static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
162 int gen_sk)
163{
164 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
165 struct caam_full __iomem *topregs;
166 struct rng4tst __iomem *r4tst;
167 u32 *desc, status, rdsta_val;
168 int ret = 0, sh_idx;
169
170 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
171 r4tst = &topregs->ctrl.r4tst[0];
172
173 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
174 if (!desc)
175 return -ENOMEM;
176
177 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
178 /*
179 * If the corresponding bit is set, this state handle
180 * was initialized by somebody else, so it's left alone.
181 */
182 if ((1 << sh_idx) & state_handle_mask)
183 continue;
184
185 /* Create the descriptor for instantiating RNG State Handle */
186 build_instantiation_desc(desc, sh_idx, gen_sk);
187
188 /* Try to run it through DECO0 */
189 ret = run_descriptor_deco0(ctrldev, desc, &status);
190
191 /*
192 * If ret is not 0, or descriptor status is not 0, then
193 * something went wrong. No need to try the next state
194 * handle (if available), bail out here.
195 * Also, if for some reason, the State Handle didn't get
196 * instantiated although the descriptor has finished
197 * without any error (HW optimizations for later
198 * CAAM eras), then try again.
199 */
200 rdsta_val =
201 rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
202 if (status || !(rdsta_val & (1 << sh_idx)))
203 ret = -EAGAIN;
204 if (ret)
205 break;
206
207 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
208 /* Clear the contents before recreating the descriptor */
209 memset(desc, 0x00, CAAM_CMD_SZ * 7);
210 }
211
127 kfree(desc); 212 kfree(desc);
213
128 return ret; 214 return ret;
129} 215}
130 216
131/* 217/*
132 * By default, the TRNG runs for 200 clocks per sample; 218 * deinstantiate_rng - builds and executes a descriptor on DECO0,
133 * 1600 clocks per sample generates better entropy. 219 * which deinitializes the RNG block.
220 * @ctrldev - pointer to device
221 * @state_handle_mask - bitmask containing the instantiation status
222 * for the RNG4 state handles which exist in
223 * the RNG4 block: 1 if it's been instantiated
224 *
225 * Return: - 0 if no error occurred
226 * - -ENOMEM if there isn't enough memory to allocate the descriptor
227 * - -ENODEV if DECO0 couldn't be acquired
228 * - -EAGAIN if an error occurred when executing the descriptor
134 */ 229 */
135static void kick_trng(struct platform_device *pdev) 230static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
231{
232 u32 *desc, status;
233 int sh_idx, ret = 0;
234
235 desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
236 if (!desc)
237 return -ENOMEM;
238
239 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
240 /*
241 * If the corresponding bit is set, then it means the state
242 * handle was initialized by us, and thus it needs to be
243 * deintialized as well
244 */
245 if ((1 << sh_idx) & state_handle_mask) {
246 /*
247 * Create the descriptor for deinstantating this state
248 * handle
249 */
250 build_deinstantiation_desc(desc, sh_idx);
251
252 /* Try to run it through DECO0 */
253 ret = run_descriptor_deco0(ctrldev, desc, &status);
254
255 if (ret || status) {
256 dev_err(ctrldev,
257 "Failed to deinstantiate RNG4 SH%d\n",
258 sh_idx);
259 break;
260 }
261 dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
262 }
263 }
264
265 kfree(desc);
266
267 return ret;
268}
269
270static int caam_remove(struct platform_device *pdev)
271{
272 struct device *ctrldev;
273 struct caam_drv_private *ctrlpriv;
274 struct caam_full __iomem *topregs;
275 int ring, ret = 0;
276
277 ctrldev = &pdev->dev;
278 ctrlpriv = dev_get_drvdata(ctrldev);
279 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
280
281 /* Remove platform devices for JobRs */
282 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
283 if (ctrlpriv->jrpdev[ring])
284 of_device_unregister(ctrlpriv->jrpdev[ring]);
285 }
286
287 /* De-initialize RNG state handles initialized by this driver. */
288 if (ctrlpriv->rng4_sh_init)
289 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
290
291 /* Shut down debug views */
292#ifdef CONFIG_DEBUG_FS
293 debugfs_remove_recursive(ctrlpriv->dfs_root);
294#endif
295
296 /* Unmap controller region */
297 iounmap(&topregs->ctrl);
298
299 kfree(ctrlpriv->jrpdev);
300 kfree(ctrlpriv);
301
302 return ret;
303}
304
305/*
306 * kick_trng - sets the various parameters for enabling the initialization
307 * of the RNG4 block in CAAM
308 * @pdev - pointer to the platform device
309 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
310 */
311static void kick_trng(struct platform_device *pdev, int ent_delay)
136{ 312{
137 struct device *ctrldev = &pdev->dev; 313 struct device *ctrldev = &pdev->dev;
138 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 314 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
@@ -145,14 +321,31 @@ static void kick_trng(struct platform_device *pdev)
145 321
146 /* put RNG4 into program mode */ 322 /* put RNG4 into program mode */
147 setbits32(&r4tst->rtmctl, RTMCTL_PRGM); 323 setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
148 /* 1600 clocks per sample */ 324
325 /*
326 * Performance-wise, it does not make sense to
327 * set the delay to a value that is lower
328 * than the last one that worked (i.e. the state handles
329 * were instantiated properly. Thus, instead of wasting
330 * time trying to set the values controlling the sample
331 * frequency, the function simply returns.
332 */
333 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
334 >> RTSDCTL_ENT_DLY_SHIFT;
335 if (ent_delay <= val) {
336 /* put RNG4 into run mode */
337 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
338 return;
339 }
340
149 val = rd_reg32(&r4tst->rtsdctl); 341 val = rd_reg32(&r4tst->rtsdctl);
150 val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT); 342 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
343 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
151 wr_reg32(&r4tst->rtsdctl, val); 344 wr_reg32(&r4tst->rtsdctl, val);
152 /* min. freq. count */ 345 /* min. freq. count, equal to 1/4 of the entropy sample length */
153 wr_reg32(&r4tst->rtfrqmin, 400); 346 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
154 /* max. freq. count */ 347 /* max. freq. count, equal to 8 times the entropy sample length */
155 wr_reg32(&r4tst->rtfrqmax, 6400); 348 wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
156 /* put RNG4 into run mode */ 349 /* put RNG4 into run mode */
157 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); 350 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
158} 351}
@@ -193,7 +386,7 @@ EXPORT_SYMBOL(caam_get_era);
193/* Probe routine for CAAM top (controller) level */ 386/* Probe routine for CAAM top (controller) level */
194static int caam_probe(struct platform_device *pdev) 387static int caam_probe(struct platform_device *pdev)
195{ 388{
196 int ret, ring, rspec; 389 int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
197 u64 caam_id; 390 u64 caam_id;
198 struct device *dev; 391 struct device *dev;
199 struct device_node *nprop, *np; 392 struct device_node *nprop, *np;
@@ -258,8 +451,9 @@ static int caam_probe(struct platform_device *pdev)
258 rspec++; 451 rspec++;
259 } 452 }
260 453
261 ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL); 454 ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
262 if (ctrlpriv->jrdev == NULL) { 455 GFP_KERNEL);
456 if (ctrlpriv->jrpdev == NULL) {
263 iounmap(&topregs->ctrl); 457 iounmap(&topregs->ctrl);
264 return -ENOMEM; 458 return -ENOMEM;
265 } 459 }
@@ -267,13 +461,24 @@ static int caam_probe(struct platform_device *pdev)
267 ring = 0; 461 ring = 0;
268 ctrlpriv->total_jobrs = 0; 462 ctrlpriv->total_jobrs = 0;
269 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { 463 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
270 caam_jr_probe(pdev, np, ring); 464 ctrlpriv->jrpdev[ring] =
465 of_platform_device_create(np, NULL, dev);
466 if (!ctrlpriv->jrpdev[ring]) {
467 pr_warn("JR%d Platform device creation error\n", ring);
468 continue;
469 }
271 ctrlpriv->total_jobrs++; 470 ctrlpriv->total_jobrs++;
272 ring++; 471 ring++;
273 } 472 }
274 if (!ring) { 473 if (!ring) {
275 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") { 474 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
276 caam_jr_probe(pdev, np, ring); 475 ctrlpriv->jrpdev[ring] =
476 of_platform_device_create(np, NULL, dev);
477 if (!ctrlpriv->jrpdev[ring]) {
478 pr_warn("JR%d Platform device creation error\n",
479 ring);
480 continue;
481 }
277 ctrlpriv->total_jobrs++; 482 ctrlpriv->total_jobrs++;
278 ring++; 483 ring++;
279 } 484 }
@@ -299,16 +504,55 @@ static int caam_probe(struct platform_device *pdev)
299 504
300 /* 505 /*
301 * If SEC has RNG version >= 4 and RNG state handle has not been 506 * If SEC has RNG version >= 4 and RNG state handle has not been
302 * already instantiated ,do RNG instantiation 507 * already instantiated, do RNG instantiation
303 */ 508 */
304 if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && 509 if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
305 !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { 510 ctrlpriv->rng4_sh_init =
306 kick_trng(pdev); 511 rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
307 ret = instantiate_rng(dev); 512 /*
513 * If the secure keys (TDKEK, JDKEK, TDSK), were already
514 * generated, signal this to the function that is instantiating
515 * the state handles. An error would occur if RNG4 attempts
516 * to regenerate these keys before the next POR.
517 */
518 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
519 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
520 do {
521 int inst_handles =
522 rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
523 RDSTA_IFMASK;
524 /*
525 * If either SH were instantiated by somebody else
526 * (e.g. u-boot) then it is assumed that the entropy
527 * parameters are properly set and thus the function
528 * setting these (kick_trng(...)) is skipped.
529 * Also, if a handle was instantiated, do not change
530 * the TRNG parameters.
531 */
532 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
533 kick_trng(pdev, ent_delay);
534 ent_delay += 400;
535 }
536 /*
537 * if instantiate_rng(...) fails, the loop will rerun
538 * and the kick_trng(...) function will modfiy the
539 * upper and lower limits of the entropy sampling
540 * interval, leading to a sucessful initialization of
541 * the RNG.
542 */
543 ret = instantiate_rng(dev, inst_handles,
544 gen_sk);
545 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
308 if (ret) { 546 if (ret) {
547 dev_err(dev, "failed to instantiate RNG");
309 caam_remove(pdev); 548 caam_remove(pdev);
310 return ret; 549 return ret;
311 } 550 }
551 /*
552 * Set handles init'ed by this module as the complement of the
553 * already initialized ones
554 */
555 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
312 556
313 /* Enable RDB bit so that RNG works faster */ 557 /* Enable RDB bit so that RNG works faster */
314 setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE); 558 setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 53b296f78b0d..7e4500f18df6 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,8 +1155,15 @@ struct sec4_sg_entry {
1155 1155
1156/* randomizer AAI set */ 1156/* randomizer AAI set */
1157#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) 1157#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
1158#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT) 1158#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
1159#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT) 1159#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
1160
1161/* RNG4 AAI set */
1162#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT)
1163#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT)
1164#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
1165#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
1166#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
1160 1167
1161/* hmac/smac AAI set */ 1168/* hmac/smac AAI set */
1162#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) 1169#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
@@ -1178,12 +1185,6 @@ struct sec4_sg_entry {
1178#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) 1185#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
1179#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) 1186#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
1180 1187
1181/* RNG4 set */
1182#define OP_ALG_RNG4_SHIFT 4
1183#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT)
1184
1185#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
1186
1187#define OP_ALG_AS_SHIFT 2 1188#define OP_ALG_AS_SHIFT 2
1188#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) 1189#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
1189#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) 1190#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 34c4b9f7fbfa..6d85fcc5bd0a 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -37,13 +37,16 @@ struct caam_jrentry_info {
37 37
38/* Private sub-storage for a single JobR */ 38/* Private sub-storage for a single JobR */
39struct caam_drv_private_jr { 39struct caam_drv_private_jr {
40 struct device *parentdev; /* points back to controller dev */ 40 struct list_head list_node; /* Job Ring device list */
41 struct platform_device *jr_pdev;/* points to platform device for JR */ 41 struct device *dev;
42 int ridx; 42 int ridx;
43 struct caam_job_ring __iomem *rregs; /* JobR's register space */ 43 struct caam_job_ring __iomem *rregs; /* JobR's register space */
44 struct tasklet_struct irqtask; 44 struct tasklet_struct irqtask;
45 int irq; /* One per queue */ 45 int irq; /* One per queue */
46 46
47 /* Number of scatterlist crypt transforms active on the JobR */
48 atomic_t tfm_count ____cacheline_aligned;
49
47 /* Job ring info */ 50 /* Job ring info */
48 int ringsize; /* Size of rings (assume input = output) */ 51 int ringsize; /* Size of rings (assume input = output) */
49 struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ 52 struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
@@ -63,7 +66,7 @@ struct caam_drv_private_jr {
63struct caam_drv_private { 66struct caam_drv_private {
64 67
65 struct device *dev; 68 struct device *dev;
66 struct device **jrdev; /* Alloc'ed array per sub-device */ 69 struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
67 struct platform_device *pdev; 70 struct platform_device *pdev;
68 71
69 /* Physical-presence section */ 72 /* Physical-presence section */
@@ -80,12 +83,11 @@ struct caam_drv_private {
80 u8 qi_present; /* Nonzero if QI present in device */ 83 u8 qi_present; /* Nonzero if QI present in device */
81 int secvio_irq; /* Security violation interrupt number */ 84 int secvio_irq; /* Security violation interrupt number */
82 85
83 /* which jr allocated to scatterlist crypto */ 86#define RNG4_MAX_HANDLES 2
84 atomic_t tfm_count ____cacheline_aligned; 87 /* RNG4 block */
85 /* list of registered crypto algorithms (mk generic context handle?) */ 88 u32 rng4_sh_init; /* This bitmap shows which of the State
86 struct list_head alg_list; 89 Handles of the RNG4 block are initialized
87 /* list of registered hash algorithms (mk generic context handle?) */ 90 by this driver */
88 struct list_head hash_list;
89 91
90 /* 92 /*
91 * debugfs entries for developer view into driver/device 93 * debugfs entries for developer view into driver/device
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index bdb786d5a5e5..1d80bd3636c5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/of_irq.h> 8#include <linux/of_irq.h>
9#include <linux/of_address.h>
9 10
10#include "compat.h" 11#include "compat.h"
11#include "regs.h" 12#include "regs.h"
@@ -13,6 +14,113 @@
13#include "desc.h" 14#include "desc.h"
14#include "intern.h" 15#include "intern.h"
15 16
17struct jr_driver_data {
18 /* List of Physical JobR's with the Driver */
19 struct list_head jr_list;
20 spinlock_t jr_alloc_lock; /* jr_list lock */
21} ____cacheline_aligned;
22
23static struct jr_driver_data driver_data;
24
25static int caam_reset_hw_jr(struct device *dev)
26{
27 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
28 unsigned int timeout = 100000;
29
30 /*
31 * mask interrupts since we are going to poll
32 * for reset completion status
33 */
34 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
35
36 /* initiate flush (required prior to reset) */
37 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
38 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
39 JRINT_ERR_HALT_INPROGRESS) && --timeout)
40 cpu_relax();
41
42 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
43 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
44 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
45 return -EIO;
46 }
47
48 /* initiate reset */
49 timeout = 100000;
50 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
51 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
52 cpu_relax();
53
54 if (timeout == 0) {
55 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
56 return -EIO;
57 }
58
59 /* unmask interrupts */
60 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
61
62 return 0;
63}
64
65/*
66 * Shutdown JobR independent of platform property code
67 */
68int caam_jr_shutdown(struct device *dev)
69{
70 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
71 dma_addr_t inpbusaddr, outbusaddr;
72 int ret;
73
74 ret = caam_reset_hw_jr(dev);
75
76 tasklet_kill(&jrp->irqtask);
77
78 /* Release interrupt */
79 free_irq(jrp->irq, dev);
80
81 /* Free rings */
82 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
83 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
84 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
85 jrp->inpring, inpbusaddr);
86 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
87 jrp->outring, outbusaddr);
88 kfree(jrp->entinfo);
89
90 return ret;
91}
92
93static int caam_jr_remove(struct platform_device *pdev)
94{
95 int ret;
96 struct device *jrdev;
97 struct caam_drv_private_jr *jrpriv;
98
99 jrdev = &pdev->dev;
100 jrpriv = dev_get_drvdata(jrdev);
101
102 /*
103 * Return EBUSY if job ring already allocated.
104 */
105 if (atomic_read(&jrpriv->tfm_count)) {
106 dev_err(jrdev, "Device is busy\n");
107 return -EBUSY;
108 }
109
110 /* Remove the node from Physical JobR list maintained by driver */
111 spin_lock(&driver_data.jr_alloc_lock);
112 list_del(&jrpriv->list_node);
113 spin_unlock(&driver_data.jr_alloc_lock);
114
115 /* Release ring */
116 ret = caam_jr_shutdown(jrdev);
117 if (ret)
118 dev_err(jrdev, "Failed to shut down job ring\n");
119 irq_dispose_mapping(jrpriv->irq);
120
121 return ret;
122}
123
16/* Main per-ring interrupt handler */ 124/* Main per-ring interrupt handler */
17static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) 125static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
18{ 126{
@@ -128,6 +236,59 @@ static void caam_jr_dequeue(unsigned long devarg)
128} 236}
129 237
130/** 238/**
239 * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
240 *
241 * returns : pointer to the newly allocated physical
242 * JobR dev can be written to if successful.
243 **/
244struct device *caam_jr_alloc(void)
245{
246 struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
247 struct device *dev = NULL;
248 int min_tfm_cnt = INT_MAX;
249 int tfm_cnt;
250
251 spin_lock(&driver_data.jr_alloc_lock);
252
253 if (list_empty(&driver_data.jr_list)) {
254 spin_unlock(&driver_data.jr_alloc_lock);
255 return ERR_PTR(-ENODEV);
256 }
257
258 list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
259 tfm_cnt = atomic_read(&jrpriv->tfm_count);
260 if (tfm_cnt < min_tfm_cnt) {
261 min_tfm_cnt = tfm_cnt;
262 min_jrpriv = jrpriv;
263 }
264 if (!min_tfm_cnt)
265 break;
266 }
267
268 if (min_jrpriv) {
269 atomic_inc(&min_jrpriv->tfm_count);
270 dev = min_jrpriv->dev;
271 }
272 spin_unlock(&driver_data.jr_alloc_lock);
273
274 return dev;
275}
276EXPORT_SYMBOL(caam_jr_alloc);
277
278/**
279 * caam_jr_free() - Free the Job Ring
280 * @rdev - points to the dev that identifies the Job ring to
281 * be released.
282 **/
283void caam_jr_free(struct device *rdev)
284{
285 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
286
287 atomic_dec(&jrpriv->tfm_count);
288}
289EXPORT_SYMBOL(caam_jr_free);
290
291/**
131 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, 292 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
132 * -EBUSY if the queue is full, -EIO if it cannot map the caller's 293 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
133 * descriptor. 294 * descriptor.
@@ -207,46 +368,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
207} 368}
208EXPORT_SYMBOL(caam_jr_enqueue); 369EXPORT_SYMBOL(caam_jr_enqueue);
209 370
210static int caam_reset_hw_jr(struct device *dev)
211{
212 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
213 unsigned int timeout = 100000;
214
215 /*
216 * mask interrupts since we are going to poll
217 * for reset completion status
218 */
219 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
220
221 /* initiate flush (required prior to reset) */
222 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
223 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
224 JRINT_ERR_HALT_INPROGRESS) && --timeout)
225 cpu_relax();
226
227 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
228 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
229 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
230 return -EIO;
231 }
232
233 /* initiate reset */
234 timeout = 100000;
235 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
236 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
237 cpu_relax();
238
239 if (timeout == 0) {
240 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
241 return -EIO;
242 }
243
244 /* unmask interrupts */
245 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
246
247 return 0;
248}
249
250/* 371/*
251 * Init JobR independent of platform property detection 372 * Init JobR independent of platform property detection
252 */ 373 */
@@ -262,7 +383,7 @@ static int caam_jr_init(struct device *dev)
262 383
263 /* Connect job ring interrupt handler. */ 384 /* Connect job ring interrupt handler. */
264 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, 385 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
265 "caam-jobr", dev); 386 dev_name(dev), dev);
266 if (error) { 387 if (error) {
267 dev_err(dev, "can't connect JobR %d interrupt (%d)\n", 388 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
268 jrp->ridx, jrp->irq); 389 jrp->ridx, jrp->irq);
@@ -318,86 +439,43 @@ static int caam_jr_init(struct device *dev)
318 return 0; 439 return 0;
319} 440}
320 441
321/*
322 * Shutdown JobR independent of platform property code
323 */
324int caam_jr_shutdown(struct device *dev)
325{
326 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
327 dma_addr_t inpbusaddr, outbusaddr;
328 int ret;
329
330 ret = caam_reset_hw_jr(dev);
331
332 tasklet_kill(&jrp->irqtask);
333
334 /* Release interrupt */
335 free_irq(jrp->irq, dev);
336
337 /* Free rings */
338 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
339 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
340 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
341 jrp->inpring, inpbusaddr);
342 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
343 jrp->outring, outbusaddr);
344 kfree(jrp->entinfo);
345 of_device_unregister(jrp->jr_pdev);
346
347 return ret;
348}
349 442
350/* 443/*
351 * Probe routine for each detected JobR subsystem. It assumes that 444 * Probe routine for each detected JobR subsystem.
352 * property detection was picked up externally.
353 */ 445 */
354int caam_jr_probe(struct platform_device *pdev, struct device_node *np, 446static int caam_jr_probe(struct platform_device *pdev)
355 int ring)
356{ 447{
357 struct device *ctrldev, *jrdev; 448 struct device *jrdev;
358 struct platform_device *jr_pdev; 449 struct device_node *nprop;
359 struct caam_drv_private *ctrlpriv; 450 struct caam_job_ring __iomem *ctrl;
360 struct caam_drv_private_jr *jrpriv; 451 struct caam_drv_private_jr *jrpriv;
361 u32 *jroffset; 452 static int total_jobrs;
362 int error; 453 int error;
363 454
364 ctrldev = &pdev->dev; 455 jrdev = &pdev->dev;
365 ctrlpriv = dev_get_drvdata(ctrldev);
366
367 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), 456 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
368 GFP_KERNEL); 457 GFP_KERNEL);
369 if (jrpriv == NULL) { 458 if (!jrpriv)
370 dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
371 ring);
372 return -ENOMEM; 459 return -ENOMEM;
373 }
374 jrpriv->parentdev = ctrldev; /* point back to parent */
375 jrpriv->ridx = ring; /* save ring identity relative to detection */
376 460
377 /* 461 dev_set_drvdata(jrdev, jrpriv);
378 * Derive a pointer to the detected JobRs regs
379 * Driver has already iomapped the entire space, we just
380 * need to add in the offset to this JobR. Don't know if I
381 * like this long-term, but it'll run
382 */
383 jroffset = (u32 *)of_get_property(np, "reg", NULL);
384 jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
385 + *jroffset);
386 462
387 /* Build a local dev for each detected queue */ 463 /* save ring identity relative to detection */
388 jr_pdev = of_platform_device_create(np, NULL, ctrldev); 464 jrpriv->ridx = total_jobrs++;
389 if (jr_pdev == NULL) { 465
390 kfree(jrpriv); 466 nprop = pdev->dev.of_node;
391 return -EINVAL; 467 /* Get configuration properties from device tree */
468 /* First, get register page */
469 ctrl = of_iomap(nprop, 0);
470 if (!ctrl) {
471 dev_err(jrdev, "of_iomap() failed\n");
472 return -ENOMEM;
392 } 473 }
393 474
394 jrpriv->jr_pdev = jr_pdev; 475 jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
395 jrdev = &jr_pdev->dev;
396 dev_set_drvdata(jrdev, jrpriv);
397 ctrlpriv->jrdev[ring] = jrdev;
398 476
399 if (sizeof(dma_addr_t) == sizeof(u64)) 477 if (sizeof(dma_addr_t) == sizeof(u64))
400 if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring")) 478 if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
401 dma_set_mask(jrdev, DMA_BIT_MASK(40)); 479 dma_set_mask(jrdev, DMA_BIT_MASK(40));
402 else 480 else
403 dma_set_mask(jrdev, DMA_BIT_MASK(36)); 481 dma_set_mask(jrdev, DMA_BIT_MASK(36));
@@ -405,15 +483,61 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
405 dma_set_mask(jrdev, DMA_BIT_MASK(32)); 483 dma_set_mask(jrdev, DMA_BIT_MASK(32));
406 484
407 /* Identify the interrupt */ 485 /* Identify the interrupt */
408 jrpriv->irq = irq_of_parse_and_map(np, 0); 486 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
409 487
410 /* Now do the platform independent part */ 488 /* Now do the platform independent part */
411 error = caam_jr_init(jrdev); /* now turn on hardware */ 489 error = caam_jr_init(jrdev); /* now turn on hardware */
412 if (error) { 490 if (error) {
413 of_device_unregister(jr_pdev);
414 kfree(jrpriv); 491 kfree(jrpriv);
415 return error; 492 return error;
416 } 493 }
417 494
418 return error; 495 jrpriv->dev = jrdev;
496 spin_lock(&driver_data.jr_alloc_lock);
497 list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
498 spin_unlock(&driver_data.jr_alloc_lock);
499
500 atomic_set(&jrpriv->tfm_count, 0);
501
502 return 0;
503}
504
505static struct of_device_id caam_jr_match[] = {
506 {
507 .compatible = "fsl,sec-v4.0-job-ring",
508 },
509 {
510 .compatible = "fsl,sec4.0-job-ring",
511 },
512 {},
513};
514MODULE_DEVICE_TABLE(of, caam_jr_match);
515
516static struct platform_driver caam_jr_driver = {
517 .driver = {
518 .name = "caam_jr",
519 .owner = THIS_MODULE,
520 .of_match_table = caam_jr_match,
521 },
522 .probe = caam_jr_probe,
523 .remove = caam_jr_remove,
524};
525
526static int __init jr_driver_init(void)
527{
528 spin_lock_init(&driver_data.jr_alloc_lock);
529 INIT_LIST_HEAD(&driver_data.jr_list);
530 return platform_driver_register(&caam_jr_driver);
531}
532
533static void __exit jr_driver_exit(void)
534{
535 platform_driver_unregister(&caam_jr_driver);
419} 536}
537
538module_init(jr_driver_init);
539module_exit(jr_driver_exit);
540
541MODULE_LICENSE("GPL");
542MODULE_DESCRIPTION("FSL CAAM JR request backend");
543MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
index 9d8741a59037..97113a6d6c58 100644
--- a/drivers/crypto/caam/jr.h
+++ b/drivers/crypto/caam/jr.h
@@ -8,12 +8,11 @@
8#define JR_H 8#define JR_H
9 9
10/* Prototypes for backend-level services exposed to APIs */ 10/* Prototypes for backend-level services exposed to APIs */
11struct device *caam_jr_alloc(void);
12void caam_jr_free(struct device *rdev);
11int caam_jr_enqueue(struct device *dev, u32 *desc, 13int caam_jr_enqueue(struct device *dev, u32 *desc,
12 void (*cbk)(struct device *dev, u32 *desc, u32 status, 14 void (*cbk)(struct device *dev, u32 *desc, u32 status,
13 void *areq), 15 void *areq),
14 void *areq); 16 void *areq);
15 17
16extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
17 int ring);
18extern int caam_jr_shutdown(struct device *dev);
19#endif /* JR_H */ 18#endif /* JR_H */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 4455396918de..d50174f45b21 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -245,7 +245,7 @@ struct rngtst {
245 245
246/* RNG4 TRNG test registers */ 246/* RNG4 TRNG test registers */
247struct rng4tst { 247struct rng4tst {
248#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ 248#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
249 u32 rtmctl; /* misc. control register */ 249 u32 rtmctl; /* misc. control register */
250 u32 rtscmisc; /* statistical check misc. register */ 250 u32 rtscmisc; /* statistical check misc. register */
251 u32 rtpkrrng; /* poker range register */ 251 u32 rtpkrrng; /* poker range register */
@@ -255,6 +255,8 @@ struct rng4tst {
255 }; 255 };
256#define RTSDCTL_ENT_DLY_SHIFT 16 256#define RTSDCTL_ENT_DLY_SHIFT 16
257#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) 257#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
258#define RTSDCTL_ENT_DLY_MIN 1200
259#define RTSDCTL_ENT_DLY_MAX 12800
258 u32 rtsdctl; /* seed control register */ 260 u32 rtsdctl; /* seed control register */
259 union { 261 union {
260 u32 rtsblim; /* PRGM=1: sparse bit limit register */ 262 u32 rtsblim; /* PRGM=1: sparse bit limit register */
@@ -266,7 +268,11 @@ struct rng4tst {
266 u32 rtfrqcnt; /* PRGM=0: freq. count register */ 268 u32 rtfrqcnt; /* PRGM=0: freq. count register */
267 }; 269 };
268 u32 rsvd1[40]; 270 u32 rsvd1[40];
271#define RDSTA_SKVT 0x80000000
272#define RDSTA_SKVN 0x40000000
269#define RDSTA_IF0 0x00000001 273#define RDSTA_IF0 0x00000001
274#define RDSTA_IF1 0x00000002
275#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
270 u32 rdsta; 276 u32 rdsta;
271 u32 rsvd2[15]; 277 u32 rsvd2[15];
272}; 278};
@@ -692,6 +698,7 @@ struct caam_deco {
692 u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ 698 u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
693 u32 jr_ctl_lo; 699 u32 jr_ctl_lo;
694 u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ 700 u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
701#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
695 u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ 702 u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
696 u32 op_status_lo; 703 u32 op_status_lo;
697 u32 rsvd24[2]; 704 u32 rsvd24[2];
@@ -706,12 +713,13 @@ struct caam_deco {
706 u32 rsvd29[48]; 713 u32 rsvd29[48];
707 u32 descbuf[64]; /* DxDESB - Descriptor buffer */ 714 u32 descbuf[64]; /* DxDESB - Descriptor buffer */
708 u32 rscvd30[193]; 715 u32 rscvd30[193];
716#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
717#define DESC_DBG_DECO_STAT_VALID 0x80000000
718#define DESC_DBG_DECO_STAT_MASK 0x00F00000
709 u32 desc_dbg; /* DxDDR - DECO Debug Register */ 719 u32 desc_dbg; /* DxDDR - DECO Debug Register */
710 u32 rsvd31[126]; 720 u32 rsvd31[126];
711}; 721};
712 722
713/* DECO DBG Register Valid Bit*/
714#define DECO_DBG_VALID 0x80000000
715#define DECO_JQCR_WHL 0x20000000 723#define DECO_JQCR_WHL 0x20000000
716#define DECO_JQCR_FOUR 0x10000000 724#define DECO_JQCR_FOUR 0x10000000
717 725
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index e0037c8ee243..b12ff85f4241 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
117 return nents; 117 return nents;
118} 118}
119 119
120/* Map SG page in kernel virtual address space and copy */
121static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
122 int len, int offset)
123{
124 u8 *mapped_addr;
125
126 /*
127 * Page here can be user-space pinned using get_user_pages
128 * Same must be kmapped before use and kunmapped subsequently
129 */
130 mapped_addr = kmap_atomic(sg_page(sg));
131 memcpy(dest, mapped_addr + offset, len);
132 kunmap_atomic(mapped_addr);
133}
134
120/* Copy from len bytes of sg to dest, starting from beginning */ 135/* Copy from len bytes of sg to dest, starting from beginning */
121static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) 136static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
122{ 137{
@@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
124 int cpy_index = 0, next_cpy_index = current_sg->length; 139 int cpy_index = 0, next_cpy_index = current_sg->length;
125 140
126 while (next_cpy_index < len) { 141 while (next_cpy_index < len) {
127 memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), 142 sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
128 current_sg->length); 143 current_sg->offset);
129 current_sg = scatterwalk_sg_next(current_sg); 144 current_sg = scatterwalk_sg_next(current_sg);
130 cpy_index = next_cpy_index; 145 cpy_index = next_cpy_index;
131 next_cpy_index += current_sg->length; 146 next_cpy_index += current_sg->length;
132 } 147 }
133 if (cpy_index < len) 148 if (cpy_index < len)
134 memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), 149 sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
135 len - cpy_index); 150 current_sg->offset);
136} 151}
137 152
138/* Copy sg data, from to_skip to end, to dest */ 153/* Copy sg data, from to_skip to end, to dest */
@@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
140 int to_skip, unsigned int end) 155 int to_skip, unsigned int end)
141{ 156{
142 struct scatterlist *current_sg = sg; 157 struct scatterlist *current_sg = sg;
143 int sg_index, cpy_index; 158 int sg_index, cpy_index, offset;
144 159
145 sg_index = current_sg->length; 160 sg_index = current_sg->length;
146 while (sg_index <= to_skip) { 161 while (sg_index <= to_skip) {
@@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
148 sg_index += current_sg->length; 163 sg_index += current_sg->length;
149 } 164 }
150 cpy_index = sg_index - to_skip; 165 cpy_index = sg_index - to_skip;
151 memcpy(dest, (u8 *) sg_virt(current_sg) + 166 offset = current_sg->offset + current_sg->length - cpy_index;
152 current_sg->length - cpy_index, cpy_index); 167 sg_map_copy(dest, current_sg, cpy_index, offset);
153 current_sg = scatterwalk_sg_next(current_sg); 168 if (end - sg_index) {
154 if (end - sg_index) 169 current_sg = scatterwalk_sg_next(current_sg);
155 sg_copy(dest + cpy_index, current_sg, end - sg_index); 170 sg_copy(dest + cpy_index, current_sg, end - sg_index);
171 }
156} 172}
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c
index a8a7dd4b0d25..247ab8048f5b 100644
--- a/drivers/crypto/dcp.c
+++ b/drivers/crypto/dcp.c
@@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev)
733 platform_set_drvdata(pdev, dev); 733 platform_set_drvdata(pdev, dev);
734 734
735 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 735 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
736 if (!r) { 736 dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r);
737 dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n"); 737 if (IS_ERR(dev->dcp_regs_base))
738 return -ENXIO; 738 return PTR_ERR(dev->dcp_regs_base);
739 }
740 dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start,
741 resource_size(r));
742 739
743 dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); 740 dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
744 udelay(10); 741 udelay(10);
@@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev)
762 return -EIO; 759 return -EIO;
763 } 760 }
764 dev->dcp_vmi_irq = r->start; 761 dev->dcp_vmi_irq = r->start;
765 ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev); 762 ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0,
763 "dcp", dev);
766 if (ret != 0) { 764 if (ret != 0) {
767 dev_err(&pdev->dev, "can't request_irq (0)\n"); 765 dev_err(&pdev->dev, "can't request_irq (0)\n");
768 return -EIO; 766 return -EIO;
@@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev)
771 r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 769 r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
772 if (!r) { 770 if (!r) {
773 dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); 771 dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
774 ret = -EIO; 772 return -EIO;
775 goto err_free_irq0;
776 } 773 }
777 dev->dcp_irq = r->start; 774 dev->dcp_irq = r->start;
778 ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev); 775 ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp",
776 dev);
779 if (ret != 0) { 777 if (ret != 0) {
780 dev_err(&pdev->dev, "can't request_irq (1)\n"); 778 dev_err(&pdev->dev, "can't request_irq (1)\n");
781 ret = -EIO; 779 return -EIO;
782 goto err_free_irq0;
783 } 780 }
784 781
785 dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, 782 dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
@@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev)
788 GFP_KERNEL); 785 GFP_KERNEL);
789 if (!dev->hw_pkg[0]) { 786 if (!dev->hw_pkg[0]) {
790 dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); 787 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
791 ret = -ENOMEM; 788 return -ENOMEM;
792 goto err_free_irq1;
793 } 789 }
794 790
795 for (i = 1; i < DCP_MAX_PKG; i++) { 791 for (i = 1; i < DCP_MAX_PKG; i++) {
@@ -848,16 +844,14 @@ err_unregister:
848 for (j = 0; j < i; j++) 844 for (j = 0; j < i; j++)
849 crypto_unregister_alg(&algs[j]); 845 crypto_unregister_alg(&algs[j]);
850err_free_key_iv: 846err_free_key_iv:
847 tasklet_kill(&dev->done_task);
848 tasklet_kill(&dev->queue_task);
851 dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, 849 dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
852 dev->payload_base_dma); 850 dev->payload_base_dma);
853err_free_hw_packet: 851err_free_hw_packet:
854 dma_free_coherent(&pdev->dev, DCP_MAX_PKG * 852 dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
855 sizeof(struct dcp_hw_packet), dev->hw_pkg[0], 853 sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
856 dev->hw_phys_pkg); 854 dev->hw_phys_pkg);
857err_free_irq1:
858 free_irq(dev->dcp_irq, dev);
859err_free_irq0:
860 free_irq(dev->dcp_vmi_irq, dev);
861 855
862 return ret; 856 return ret;
863} 857}
@@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev)
868 int j; 862 int j;
869 dev = platform_get_drvdata(pdev); 863 dev = platform_get_drvdata(pdev);
870 864
871 dma_free_coherent(&pdev->dev, 865 misc_deregister(&dev->dcp_bootstream_misc);
872 DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
873 dev->hw_pkg[0], dev->hw_phys_pkg);
874
875 dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
876 dev->payload_base_dma);
877 866
878 free_irq(dev->dcp_irq, dev); 867 for (j = 0; j < ARRAY_SIZE(algs); j++)
879 free_irq(dev->dcp_vmi_irq, dev); 868 crypto_unregister_alg(&algs[j]);
880 869
881 tasklet_kill(&dev->done_task); 870 tasklet_kill(&dev->done_task);
882 tasklet_kill(&dev->queue_task); 871 tasklet_kill(&dev->queue_task);
883 872
884 for (j = 0; j < ARRAY_SIZE(algs); j++) 873 dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
885 crypto_unregister_alg(&algs[j]); 874 dev->payload_base_dma);
886 875
887 misc_deregister(&dev->dcp_bootstream_misc); 876 dma_free_coherent(&pdev->dev,
877 DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
878 dev->hw_pkg[0], dev->hw_phys_pkg);
888 879
889 return 0; 880 return 0;
890} 881}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 214357e12dc0..9dd6e01eac33 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1149,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1149 unsigned int keylen) 1149 unsigned int keylen)
1150{ 1150{
1151 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1151 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1152 struct rtattr *rta = (struct rtattr *)key; 1152 struct crypto_authenc_keys keys;
1153 struct crypto_authenc_key_param *param;
1154 1153
1155 if (!RTA_OK(rta, keylen)) 1154 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1156 goto badkey;
1157 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1158 goto badkey;
1159 if (RTA_PAYLOAD(rta) < sizeof(*param))
1160 goto badkey; 1155 goto badkey;
1161 1156
1162 param = RTA_DATA(rta); 1157 if (keys.authkeylen > sizeof(ctx->authkey))
1163 ctx->enckey_len = be32_to_cpu(param->enckeylen); 1158 goto badkey;
1164
1165 key += RTA_ALIGN(rta->rta_len);
1166 keylen -= RTA_ALIGN(rta->rta_len);
1167 1159
1168 if (keylen < ctx->enckey_len) 1160 if (keys.enckeylen > sizeof(ctx->enckey))
1169 goto badkey; 1161 goto badkey;
1170 1162
1171 ctx->authkey_len = keylen - ctx->enckey_len; 1163 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1172 memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len); 1164 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1173 memcpy(ctx->authkey, key, ctx->authkey_len); 1165 ctx->authkey_len = keys.authkeylen;
1166 ctx->enckey_len = keys.enckeylen;
1174 1167
1175 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1168 return aead_setup(tfm, crypto_aead_authsize(tfm));
1176badkey: 1169badkey:
1177 ctx->enckey_len = 0;
1178 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 1170 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1179 return -EINVAL; 1171 return -EINVAL;
1180} 1172}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 3374a3ebe4c7..8d1e6f8e9e9c 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
907 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); 907 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
908} 908}
909 909
910irqreturn_t crypto_int(int irq, void *priv) 910static irqreturn_t crypto_int(int irq, void *priv)
911{ 911{
912 u32 val; 912 u32 val;
913 913
@@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv)
928 return IRQ_HANDLED; 928 return IRQ_HANDLED;
929} 929}
930 930
931struct crypto_alg mv_aes_alg_ecb = { 931static struct crypto_alg mv_aes_alg_ecb = {
932 .cra_name = "ecb(aes)", 932 .cra_name = "ecb(aes)",
933 .cra_driver_name = "mv-ecb-aes", 933 .cra_driver_name = "mv-ecb-aes",
934 .cra_priority = 300, 934 .cra_priority = 300,
@@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = {
951 }, 951 },
952}; 952};
953 953
954struct crypto_alg mv_aes_alg_cbc = { 954static struct crypto_alg mv_aes_alg_cbc = {
955 .cra_name = "cbc(aes)", 955 .cra_name = "cbc(aes)",
956 .cra_driver_name = "mv-cbc-aes", 956 .cra_driver_name = "mv-cbc-aes",
957 .cra_priority = 300, 957 .cra_priority = 300,
@@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = {
975 }, 975 },
976}; 976};
977 977
978struct ahash_alg mv_sha1_alg = { 978static struct ahash_alg mv_sha1_alg = {
979 .init = mv_hash_init, 979 .init = mv_hash_init,
980 .update = mv_hash_update, 980 .update = mv_hash_update,
981 .final = mv_hash_final, 981 .final = mv_hash_final,
@@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = {
999 } 999 }
1000}; 1000};
1001 1001
1002struct ahash_alg mv_hmac_sha1_alg = { 1002static struct ahash_alg mv_hmac_sha1_alg = {
1003 .init = mv_hash_init, 1003 .init = mv_hash_init,
1004 .update = mv_hash_update, 1004 .update = mv_hash_update,
1005 .final = mv_hash_final, 1005 .final = mv_hash_final,
@@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev)
1084 goto err_unmap_sram; 1084 goto err_unmap_sram;
1085 } 1085 }
1086 1086
1087 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), 1087 ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
1088 cp); 1088 cp);
1089 if (ret) 1089 if (ret)
1090 goto err_thread; 1090 goto err_thread;
@@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = {
1187 .driver = { 1187 .driver = {
1188 .owner = THIS_MODULE, 1188 .owner = THIS_MODULE,
1189 .name = "mv_crypto", 1189 .name = "mv_crypto",
1190 .of_match_table = of_match_ptr(mv_cesa_of_match_table), 1190 .of_match_table = mv_cesa_of_match_table,
1191 }, 1191 },
1192}; 1192};
1193MODULE_ALIAS("platform:mv_crypto"); 1193MODULE_ALIAS("platform:mv_crypto");
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index ce791c2f81f7..a9ccbf14096e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -275,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
275 if (dd->flags & FLAGS_CBC) 275 if (dd->flags & FLAGS_CBC)
276 val |= AES_REG_CTRL_CBC; 276 val |= AES_REG_CTRL_CBC;
277 if (dd->flags & FLAGS_CTR) { 277 if (dd->flags & FLAGS_CTR) {
278 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; 278 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
279 mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; 279 mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
280 } 280 }
281 if (dd->flags & FLAGS_ENCRYPT) 281 if (dd->flags & FLAGS_ENCRYPT)
@@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
554 return err; 554 return err;
555} 555}
556 556
557int omap_aes_check_aligned(struct scatterlist *sg) 557static int omap_aes_check_aligned(struct scatterlist *sg)
558{ 558{
559 while (sg) { 559 while (sg) {
560 if (!IS_ALIGNED(sg->offset, 4)) 560 if (!IS_ALIGNED(sg->offset, 4))
@@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg)
566 return 0; 566 return 0;
567} 567}
568 568
569int omap_aes_copy_sgs(struct omap_aes_dev *dd) 569static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
570{ 570{
571 void *buf_in, *buf_out; 571 void *buf_in, *buf_out;
572 int pages; 572 int pages;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index e28104b4aab0..e45aaaf0db30 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2033,3 +2033,4 @@ module_platform_driver(omap_sham_driver);
2033MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); 2033MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2034MODULE_LICENSE("GPL v2"); 2034MODULE_LICENSE("GPL v2");
2035MODULE_AUTHOR("Dmitry Kasatkin"); 2035MODULE_AUTHOR("Dmitry Kasatkin");
2036MODULE_ALIAS("platform:omap-sham");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 888f7f4a6d3f..a6175ba6d238 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
495{ 495{
496 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); 496 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
497 struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); 497 struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
498 struct rtattr *rta = (void *)key; 498 struct crypto_authenc_keys keys;
499 struct crypto_authenc_key_param *param;
500 unsigned int authkeylen, enckeylen;
501 int err = -EINVAL; 499 int err = -EINVAL;
502 500
503 if (!RTA_OK(rta, keylen)) 501 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
504 goto badkey; 502 goto badkey;
505 503
506 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 504 if (keys.enckeylen > AES_MAX_KEY_SIZE)
507 goto badkey; 505 goto badkey;
508 506
509 if (RTA_PAYLOAD(rta) < sizeof(*param)) 507 if (keys.authkeylen > sizeof(ctx->hash_ctx))
510 goto badkey;
511
512 param = RTA_DATA(rta);
513 enckeylen = be32_to_cpu(param->enckeylen);
514
515 key += RTA_ALIGN(rta->rta_len);
516 keylen -= RTA_ALIGN(rta->rta_len);
517
518 if (keylen < enckeylen)
519 goto badkey;
520
521 authkeylen = keylen - enckeylen;
522
523 if (enckeylen > AES_MAX_KEY_SIZE)
524 goto badkey; 508 goto badkey;
525 509
526 if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == 510 if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
527 SPA_CTRL_CIPH_ALG_AES) 511 SPA_CTRL_CIPH_ALG_AES)
528 err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen); 512 err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
529 else 513 else
530 err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen); 514 err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
531 515
532 if (err) 516 if (err)
533 goto badkey; 517 goto badkey;
534 518
535 memcpy(ctx->hash_ctx, key, authkeylen); 519 memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
536 ctx->hash_key_len = authkeylen; 520 ctx->hash_key_len = keys.authkeylen;
537 521
538 return 0; 522 return 0;
539 523
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d7bb8bac36e9..785a9ded7bdf 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = {
1058 .driver = { 1058 .driver = {
1059 .name = SAHARA_NAME, 1059 .name = SAHARA_NAME,
1060 .owner = THIS_MODULE, 1060 .owner = THIS_MODULE,
1061 .of_match_table = of_match_ptr(sahara_dt_ids), 1061 .of_match_table = sahara_dt_ids,
1062 }, 1062 },
1063 .id_table = sahara_platform_ids, 1063 .id_table = sahara_platform_ids,
1064}; 1064};
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6cd0e6038583..b44f4ddc565c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -673,39 +673,20 @@ static int aead_setkey(struct crypto_aead *authenc,
673 const u8 *key, unsigned int keylen) 673 const u8 *key, unsigned int keylen)
674{ 674{
675 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 675 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
676 struct rtattr *rta = (void *)key; 676 struct crypto_authenc_keys keys;
677 struct crypto_authenc_key_param *param;
678 unsigned int authkeylen;
679 unsigned int enckeylen;
680
681 if (!RTA_OK(rta, keylen))
682 goto badkey;
683 677
684 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 678 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
685 goto badkey; 679 goto badkey;
686 680
687 if (RTA_PAYLOAD(rta) < sizeof(*param)) 681 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
688 goto badkey; 682 goto badkey;
689 683
690 param = RTA_DATA(rta); 684 memcpy(ctx->key, keys.authkey, keys.authkeylen);
691 enckeylen = be32_to_cpu(param->enckeylen); 685 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
692
693 key += RTA_ALIGN(rta->rta_len);
694 keylen -= RTA_ALIGN(rta->rta_len);
695 686
696 if (keylen < enckeylen) 687 ctx->keylen = keys.authkeylen + keys.enckeylen;
697 goto badkey; 688 ctx->enckeylen = keys.enckeylen;
698 689 ctx->authkeylen = keys.authkeylen;
699 authkeylen = keylen - enckeylen;
700
701 if (keylen > TALITOS_MAX_KEY_SIZE)
702 goto badkey;
703
704 memcpy(&ctx->key, key, keylen);
705
706 ctx->keylen = keylen;
707 ctx->enckeylen = enckeylen;
708 ctx->authkeylen = authkeylen;
709 690
710 return 0; 691 return 0;
711 692
@@ -809,7 +790,7 @@ static void ipsec_esp_unmap(struct device *dev,
809 790
810 if (edesc->assoc_chained) 791 if (edesc->assoc_chained)
811 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); 792 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
812 else 793 else if (areq->assoclen)
813 /* assoc_nents counts also for IV in non-contiguous cases */ 794 /* assoc_nents counts also for IV in non-contiguous cases */
814 dma_unmap_sg(dev, areq->assoc, 795 dma_unmap_sg(dev, areq->assoc,
815 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, 796 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
@@ -992,7 +973,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
992 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 973 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
993 edesc->dma_len, DMA_BIDIRECTIONAL); 974 edesc->dma_len, DMA_BIDIRECTIONAL);
994 } else { 975 } else {
995 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc)); 976 if (areq->assoclen)
977 to_talitos_ptr(&desc->ptr[1],
978 sg_dma_address(areq->assoc));
979 else
980 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
996 desc->ptr[1].j_extent = 0; 981 desc->ptr[1].j_extent = 0;
997 } 982 }
998 983
@@ -1127,7 +1112,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1127 unsigned int authsize, 1112 unsigned int authsize,
1128 unsigned int ivsize, 1113 unsigned int ivsize,
1129 int icv_stashing, 1114 int icv_stashing,
1130 u32 cryptoflags) 1115 u32 cryptoflags,
1116 bool encrypt)
1131{ 1117{
1132 struct talitos_edesc *edesc; 1118 struct talitos_edesc *edesc;
1133 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; 1119 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
@@ -1141,10 +1127,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1141 return ERR_PTR(-EINVAL); 1127 return ERR_PTR(-EINVAL);
1142 } 1128 }
1143 1129
1144 if (iv) 1130 if (ivsize)
1145 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1131 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1146 1132
1147 if (assoc) { 1133 if (assoclen) {
1148 /* 1134 /*
1149 * Currently it is assumed that iv is provided whenever assoc 1135 * Currently it is assumed that iv is provided whenever assoc
1150 * is. 1136 * is.
@@ -1160,19 +1146,17 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1160 assoc_nents = assoc_nents ? assoc_nents + 1 : 2; 1146 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1161 } 1147 }
1162 1148
1163 src_nents = sg_count(src, cryptlen + authsize, &src_chained); 1149 if (!dst || dst == src) {
1164 src_nents = (src_nents == 1) ? 0 : src_nents; 1150 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1165 1151 src_nents = (src_nents == 1) ? 0 : src_nents;
1166 if (!dst) { 1152 dst_nents = dst ? src_nents : 0;
1167 dst_nents = 0; 1153 } else { /* dst && dst != src*/
1168 } else { 1154 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1169 if (dst == src) { 1155 &src_chained);
1170 dst_nents = src_nents; 1156 src_nents = (src_nents == 1) ? 0 : src_nents;
1171 } else { 1157 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1172 dst_nents = sg_count(dst, cryptlen + authsize, 1158 &dst_chained);
1173 &dst_chained); 1159 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1174 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1175 }
1176 } 1160 }
1177 1161
1178 /* 1162 /*
@@ -1192,9 +1176,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1192 1176
1193 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1177 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1194 if (!edesc) { 1178 if (!edesc) {
1195 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); 1179 if (assoc_chained)
1180 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1181 else if (assoclen)
1182 dma_unmap_sg(dev, assoc,
1183 assoc_nents ? assoc_nents - 1 : 1,
1184 DMA_TO_DEVICE);
1185
1196 if (iv_dma) 1186 if (iv_dma)
1197 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 1187 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1188
1198 dev_err(dev, "could not allocate edescriptor\n"); 1189 dev_err(dev, "could not allocate edescriptor\n");
1199 return ERR_PTR(-ENOMEM); 1190 return ERR_PTR(-ENOMEM);
1200 } 1191 }
@@ -1216,7 +1207,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1216} 1207}
1217 1208
1218static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1209static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1219 int icv_stashing) 1210 int icv_stashing, bool encrypt)
1220{ 1211{
1221 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1212 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1222 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1213 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
@@ -1225,7 +1216,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1225 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, 1216 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1226 iv, areq->assoclen, areq->cryptlen, 1217 iv, areq->assoclen, areq->cryptlen,
1227 ctx->authsize, ivsize, icv_stashing, 1218 ctx->authsize, ivsize, icv_stashing,
1228 areq->base.flags); 1219 areq->base.flags, encrypt);
1229} 1220}
1230 1221
1231static int aead_encrypt(struct aead_request *req) 1222static int aead_encrypt(struct aead_request *req)
@@ -1235,7 +1226,7 @@ static int aead_encrypt(struct aead_request *req)
1235 struct talitos_edesc *edesc; 1226 struct talitos_edesc *edesc;
1236 1227
1237 /* allocate extended descriptor */ 1228 /* allocate extended descriptor */
1238 edesc = aead_edesc_alloc(req, req->iv, 0); 1229 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1239 if (IS_ERR(edesc)) 1230 if (IS_ERR(edesc))
1240 return PTR_ERR(edesc); 1231 return PTR_ERR(edesc);
1241 1232
@@ -1258,7 +1249,7 @@ static int aead_decrypt(struct aead_request *req)
1258 req->cryptlen -= authsize; 1249 req->cryptlen -= authsize;
1259 1250
1260 /* allocate extended descriptor */ 1251 /* allocate extended descriptor */
1261 edesc = aead_edesc_alloc(req, req->iv, 1); 1252 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1262 if (IS_ERR(edesc)) 1253 if (IS_ERR(edesc))
1263 return PTR_ERR(edesc); 1254 return PTR_ERR(edesc);
1264 1255
@@ -1304,7 +1295,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
1304 struct talitos_edesc *edesc; 1295 struct talitos_edesc *edesc;
1305 1296
1306 /* allocate extended descriptor */ 1297 /* allocate extended descriptor */
1307 edesc = aead_edesc_alloc(areq, req->giv, 0); 1298 edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1308 if (IS_ERR(edesc)) 1299 if (IS_ERR(edesc))
1309 return PTR_ERR(edesc); 1300 return PTR_ERR(edesc);
1310 1301
@@ -1460,7 +1451,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1460} 1451}
1461 1452
1462static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * 1453static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1463 areq) 1454 areq, bool encrypt)
1464{ 1455{
1465 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1456 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1466 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1457 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
@@ -1468,7 +1459,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1468 1459
1469 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, 1460 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1470 areq->info, 0, areq->nbytes, 0, ivsize, 0, 1461 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1471 areq->base.flags); 1462 areq->base.flags, encrypt);
1472} 1463}
1473 1464
1474static int ablkcipher_encrypt(struct ablkcipher_request *areq) 1465static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1478,7 +1469,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1478 struct talitos_edesc *edesc; 1469 struct talitos_edesc *edesc;
1479 1470
1480 /* allocate extended descriptor */ 1471 /* allocate extended descriptor */
1481 edesc = ablkcipher_edesc_alloc(areq); 1472 edesc = ablkcipher_edesc_alloc(areq, true);
1482 if (IS_ERR(edesc)) 1473 if (IS_ERR(edesc))
1483 return PTR_ERR(edesc); 1474 return PTR_ERR(edesc);
1484 1475
@@ -1495,7 +1486,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1495 struct talitos_edesc *edesc; 1486 struct talitos_edesc *edesc;
1496 1487
1497 /* allocate extended descriptor */ 1488 /* allocate extended descriptor */
1498 edesc = ablkcipher_edesc_alloc(areq); 1489 edesc = ablkcipher_edesc_alloc(areq, false);
1499 if (IS_ERR(edesc)) 1490 if (IS_ERR(edesc))
1500 return PTR_ERR(edesc); 1491 return PTR_ERR(edesc);
1501 1492
@@ -1647,7 +1638,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1647 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1638 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1648 1639
1649 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, 1640 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1650 nbytes, 0, 0, 0, areq->base.flags); 1641 nbytes, 0, 0, 0, areq->base.flags, false);
1651} 1642}
1652 1643
1653static int ahash_init(struct ahash_request *areq) 1644static int ahash_init(struct ahash_request *areq)
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index fa05e3c329bd..060eecc5dbc3 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -27,6 +27,8 @@
27 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 27 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
28 */ 28 */
29 29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
30#include <linux/module.h> 32#include <linux/module.h>
31#include <linux/init.h> 33#include <linux/init.h>
32#include <linux/errno.h> 34#include <linux/errno.h>
@@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work);
199static DECLARE_WORK(aes_work, aes_workqueue_handler); 201static DECLARE_WORK(aes_work, aes_workqueue_handler);
200static struct workqueue_struct *aes_wq; 202static struct workqueue_struct *aes_wq;
201 203
202extern unsigned long long tegra_chip_uid(void);
203
204static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) 204static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
205{ 205{
206 return readl(dd->io_base + offset); 206 return readl(dd->io_base + offset);
@@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
713 struct tegra_aes_dev *dd = aes_dev; 713 struct tegra_aes_dev *dd = aes_dev;
714 struct tegra_aes_ctx *ctx = &rng_ctx; 714 struct tegra_aes_ctx *ctx = &rng_ctx;
715 struct tegra_aes_slot *key_slot; 715 struct tegra_aes_slot *key_slot;
716 struct timespec ts;
717 int ret = 0; 716 int ret = 0;
718 u64 nsec, tmp[2]; 717 u8 tmp[16]; /* 16 bytes = 128 bits of entropy */
719 u8 *dt; 718 u8 *dt;
720 719
721 if (!ctx || !dd) { 720 if (!ctx || !dd) {
722 dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n", 721 pr_err("ctx=0x%x, dd=0x%x\n",
723 (unsigned int)ctx, (unsigned int)dd); 722 (unsigned int)ctx, (unsigned int)dd);
724 return -EINVAL; 723 return -EINVAL;
725 } 724 }
@@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
778 if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { 777 if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
779 dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; 778 dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
780 } else { 779 } else {
781 getnstimeofday(&ts); 780 get_random_bytes(tmp, sizeof(tmp));
782 nsec = timespec_to_ns(&ts); 781 dt = tmp;
783 do_div(nsec, 1000);
784 nsec ^= dd->ctr << 56;
785 dd->ctr++;
786 tmp[0] = nsec;
787 tmp[1] = tegra_chip_uid();
788 dt = (u8 *)tmp;
789 } 782 }
790 memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); 783 memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
791 784
@@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm)
804 return 0; 797 return 0;
805} 798}
806 799
807void tegra_aes_cra_exit(struct crypto_tfm *tfm) 800static void tegra_aes_cra_exit(struct crypto_tfm *tfm)
808{ 801{
809 struct tegra_aes_ctx *ctx = 802 struct tegra_aes_ctx *ctx =
810 crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); 803 crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
@@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev)
924 } 917 }
925 918
926 /* Initialize the vde clock */ 919 /* Initialize the vde clock */
927 dd->aes_clk = clk_get(dev, "vde"); 920 dd->aes_clk = devm_clk_get(dev, "vde");
928 if (IS_ERR(dd->aes_clk)) { 921 if (IS_ERR(dd->aes_clk)) {
929 dev_err(dev, "iclock intialization failed.\n"); 922 dev_err(dev, "iclock intialization failed.\n");
930 err = -ENODEV; 923 err = -ENODEV;
@@ -1033,8 +1026,6 @@ out:
1033 if (dd->buf_out) 1026 if (dd->buf_out)
1034 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, 1027 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
1035 dd->buf_out, dd->dma_buf_out); 1028 dd->buf_out, dd->dma_buf_out);
1036 if (!IS_ERR(dd->aes_clk))
1037 clk_put(dd->aes_clk);
1038 if (aes_wq) 1029 if (aes_wq)
1039 destroy_workqueue(aes_wq); 1030 destroy_workqueue(aes_wq);
1040 spin_lock(&list_lock); 1031 spin_lock(&list_lock);
@@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev)
1068 dd->buf_in, dd->dma_buf_in); 1059 dd->buf_in, dd->dma_buf_in);
1069 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, 1060 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
1070 dd->buf_out, dd->dma_buf_out); 1061 dd->buf_out, dd->dma_buf_out);
1071 clk_put(dd->aes_clk);
1072 aes_dev = NULL; 1062 aes_dev = NULL;
1073 1063
1074 return 0; 1064 return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index dd2874ec1927..446687cc2334 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -89,14 +89,15 @@ config AT_HDMAC
89 Support the Atmel AHB DMA controller. 89 Support the Atmel AHB DMA controller.
90 90
91config FSL_DMA 91config FSL_DMA
92 tristate "Freescale Elo and Elo Plus DMA support" 92 tristate "Freescale Elo series DMA support"
93 depends on FSL_SOC 93 depends on FSL_SOC
94 select DMA_ENGINE 94 select DMA_ENGINE
95 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 95 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
96 ---help--- 96 ---help---
97 Enable support for the Freescale Elo and Elo Plus DMA controllers. 97 Enable support for the Freescale Elo series DMA controllers.
98 The Elo is the DMA controller on some 82xx and 83xx parts, and the 98 The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the
99 Elo Plus is the DMA controller on 85xx and 86xx parts. 99 EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
100 some Txxx and Bxxx parts.
100 101
101config MPC512X_DMA 102config MPC512X_DMA
102 tristate "Freescale MPC512x built-in DMA engine support" 103 tristate "Freescale MPC512x built-in DMA engine support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e51a9832ef0d..ec4ee5c1fe9d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1164,42 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
1164 kfree(txd); 1164 kfree(txd);
1165} 1165}
1166 1166
1167static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1168{
1169 struct device *dev = txd->vd.tx.chan->device->dev;
1170 struct pl08x_sg *dsg;
1171
1172 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1173 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1174 list_for_each_entry(dsg, &txd->dsg_list, node)
1175 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1176 DMA_TO_DEVICE);
1177 else {
1178 list_for_each_entry(dsg, &txd->dsg_list, node)
1179 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1180 DMA_TO_DEVICE);
1181 }
1182 }
1183 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1184 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1185 list_for_each_entry(dsg, &txd->dsg_list, node)
1186 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1187 DMA_FROM_DEVICE);
1188 else
1189 list_for_each_entry(dsg, &txd->dsg_list, node)
1190 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1191 DMA_FROM_DEVICE);
1192 }
1193}
1194
1195static void pl08x_desc_free(struct virt_dma_desc *vd) 1167static void pl08x_desc_free(struct virt_dma_desc *vd)
1196{ 1168{
1197 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1169 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1198 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1170 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
1199 1171
1200 if (!plchan->slave) 1172 dma_descriptor_unmap(&vd->tx);
1201 pl08x_unmap_buffers(txd);
1202
1203 if (!txd->done) 1173 if (!txd->done)
1204 pl08x_release_mux(plchan); 1174 pl08x_release_mux(plchan);
1205 1175
@@ -1252,7 +1222,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1252 size_t bytes = 0; 1222 size_t bytes = 0;
1253 1223
1254 ret = dma_cookie_status(chan, cookie, txstate); 1224 ret = dma_cookie_status(chan, cookie, txstate);
1255 if (ret == DMA_SUCCESS) 1225 if (ret == DMA_COMPLETE)
1256 return ret; 1226 return ret;
1257 1227
1258 /* 1228 /*
@@ -1267,7 +1237,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1267 1237
1268 spin_lock_irqsave(&plchan->vc.lock, flags); 1238 spin_lock_irqsave(&plchan->vc.lock, flags);
1269 ret = dma_cookie_status(chan, cookie, txstate); 1239 ret = dma_cookie_status(chan, cookie, txstate);
1270 if (ret != DMA_SUCCESS) { 1240 if (ret != DMA_COMPLETE) {
1271 vd = vchan_find_desc(&plchan->vc, cookie); 1241 vd = vchan_find_desc(&plchan->vc, cookie);
1272 if (vd) { 1242 if (vd) {
1273 /* On the issued list, so hasn't been processed yet */ 1243 /* On the issued list, so hasn't been processed yet */
@@ -2138,8 +2108,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2138 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2108 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2139 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2109 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2140 2110
2141 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 2111 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
2142 DRIVER_NAME, pl08x);
2143 if (ret) { 2112 if (ret) {
2144 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2113 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2145 __func__, adev->irq[0]); 2114 __func__, adev->irq[0]);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c787f38a186a..e2c04dc81e2a 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -344,31 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
344 /* move myself to free_list */ 344 /* move myself to free_list */
345 list_move(&desc->desc_node, &atchan->free_list); 345 list_move(&desc->desc_node, &atchan->free_list);
346 346
347 /* unmap dma addresses (not on slave channels) */ 347 dma_descriptor_unmap(txd);
348 if (!atchan->chan_common.private) {
349 struct device *parent = chan2parent(&atchan->chan_common);
350 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
351 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
352 dma_unmap_single(parent,
353 desc->lli.daddr,
354 desc->len, DMA_FROM_DEVICE);
355 else
356 dma_unmap_page(parent,
357 desc->lli.daddr,
358 desc->len, DMA_FROM_DEVICE);
359 }
360 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
361 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
362 dma_unmap_single(parent,
363 desc->lli.saddr,
364 desc->len, DMA_TO_DEVICE);
365 else
366 dma_unmap_page(parent,
367 desc->lli.saddr,
368 desc->len, DMA_TO_DEVICE);
369 }
370 }
371
372 /* for cyclic transfers, 348 /* for cyclic transfers,
373 * no need to replay callback function while stopping */ 349 * no need to replay callback function while stopping */
374 if (!atc_chan_is_cyclic(atchan)) { 350 if (!atc_chan_is_cyclic(atchan)) {
@@ -1102,7 +1078,7 @@ atc_tx_status(struct dma_chan *chan,
1102 int bytes = 0; 1078 int bytes = 0;
1103 1079
1104 ret = dma_cookie_status(chan, cookie, txstate); 1080 ret = dma_cookie_status(chan, cookie, txstate);
1105 if (ret == DMA_SUCCESS) 1081 if (ret == DMA_COMPLETE)
1106 return ret; 1082 return ret;
1107 /* 1083 /*
1108 * There's no point calculating the residue if there's 1084 * There's no point calculating the residue if there's
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 31011d2a26fc..3c6716e0b78e 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2369 enum dma_status ret; 2369 enum dma_status ret;
2370 2370
2371 ret = dma_cookie_status(chan, cookie, txstate); 2371 ret = dma_cookie_status(chan, cookie, txstate);
2372 if (ret == DMA_SUCCESS) 2372 if (ret == DMA_COMPLETE)
2373 return ret; 2373 return ret;
2374 2374
2375 dma_set_residue(txstate, coh901318_get_bytes_left(chan)); 2375 dma_set_residue(txstate, coh901318_get_bytes_left(chan));
@@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
2694 if (irq < 0) 2694 if (irq < 0)
2695 return irq; 2695 return irq;
2696 2696
2697 err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, 2697 err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
2698 "coh901318", base); 2698 "coh901318", base);
2699 if (err) 2699 if (err)
2700 return err; 2700 return err;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 7c82b92f9b16..c29dacff66fa 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -141,6 +141,9 @@ struct cppi41_dd {
141 const struct chan_queues *queues_rx; 141 const struct chan_queues *queues_rx;
142 const struct chan_queues *queues_tx; 142 const struct chan_queues *queues_tx;
143 struct chan_queues td_queue; 143 struct chan_queues td_queue;
144
145 /* context for suspend/resume */
146 unsigned int dma_tdfdq;
144}; 147};
145 148
146#define FIST_COMPLETION_QUEUE 93 149#define FIST_COMPLETION_QUEUE 93
@@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val)
263 return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); 266 return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
264} 267}
265 268
269static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
270{
271 u32 desc;
272
273 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
274 desc &= ~0x1f;
275 return desc;
276}
277
266static irqreturn_t cppi41_irq(int irq, void *data) 278static irqreturn_t cppi41_irq(int irq, void *data)
267{ 279{
268 struct cppi41_dd *cdd = data; 280 struct cppi41_dd *cdd = data;
@@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
300 q_num = __fls(val); 312 q_num = __fls(val);
301 val &= ~(1 << q_num); 313 val &= ~(1 << q_num);
302 q_num += 32 * i; 314 q_num += 32 * i;
303 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num)); 315 desc = cppi41_pop_desc(cdd, q_num);
304 desc &= ~0x1f;
305 c = desc_to_chan(cdd, desc); 316 c = desc_to_chan(cdd, desc);
306 if (WARN_ON(!c)) { 317 if (WARN_ON(!c)) {
307 pr_err("%s() q %d desc %08x\n", __func__, 318 pr_err("%s() q %d desc %08x\n", __func__,
@@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
353 364
354 /* lock */ 365 /* lock */
355 ret = dma_cookie_status(chan, cookie, txstate); 366 ret = dma_cookie_status(chan, cookie, txstate);
356 if (txstate && ret == DMA_SUCCESS) 367 if (txstate && ret == DMA_COMPLETE)
357 txstate->residue = c->residue; 368 txstate->residue = c->residue;
358 /* unlock */ 369 /* unlock */
359 370
@@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d)
517 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; 528 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
518} 529}
519 530
520static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
521{
522 u32 desc;
523
524 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
525 desc &= ~0x1f;
526 return desc;
527}
528
529static int cppi41_tear_down_chan(struct cppi41_channel *c) 531static int cppi41_tear_down_chan(struct cppi41_channel *c)
530{ 532{
531 struct cppi41_dd *cdd = c->cdd; 533 struct cppi41_dd *cdd = c->cdd;
@@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
561 c->td_retry = 100; 563 c->td_retry = 100;
562 } 564 }
563 565
564 if (!c->td_seen) { 566 if (!c->td_seen || !c->td_desc_seen) {
565 unsigned td_comp_queue;
566 567
567 if (c->is_tx) 568 desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
568 td_comp_queue = cdd->td_queue.complete; 569 if (!desc_phys)
569 else 570 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
570 td_comp_queue = c->q_comp_num;
571 571
572 desc_phys = cppi41_pop_desc(cdd, td_comp_queue); 572 if (desc_phys == c->desc_phys) {
573 if (desc_phys) { 573 c->td_desc_seen = 1;
574 __iormb(); 574
575 } else if (desc_phys == td_desc_phys) {
576 u32 pd0;
575 577
576 if (desc_phys == td_desc_phys) {
577 u32 pd0;
578 pd0 = td->pd0;
579 WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
580 WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
581 WARN_ON((pd0 & 0x1f) != c->port_num);
582 } else {
583 WARN_ON_ONCE(1);
584 }
585 c->td_seen = 1;
586 }
587 }
588 if (!c->td_desc_seen) {
589 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
590 if (desc_phys) {
591 __iormb(); 578 __iormb();
592 WARN_ON(c->desc_phys != desc_phys); 579 pd0 = td->pd0;
593 c->td_desc_seen = 1; 580 WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
581 WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
582 WARN_ON((pd0 & 0x1f) != c->port_num);
583 c->td_seen = 1;
584 } else if (desc_phys) {
585 WARN_ON_ONCE(1);
594 } 586 }
595 } 587 }
596 c->td_retry--; 588 c->td_retry--;
@@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
609 601
610 WARN_ON(!c->td_retry); 602 WARN_ON(!c->td_retry);
611 if (!c->td_desc_seen) { 603 if (!c->td_desc_seen) {
612 desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); 604 desc_phys = cppi41_pop_desc(cdd, c->q_num);
613 WARN_ON(!desc_phys); 605 WARN_ON(!desc_phys);
614 } 606 }
615 607
@@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd)
674 } 666 }
675} 667}
676 668
677static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd) 669static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
678{ 670{
679 struct cppi41_channel *cchan; 671 struct cppi41_channel *cchan;
680 int i; 672 int i;
681 int ret; 673 int ret;
682 u32 n_chans; 674 u32 n_chans;
683 675
684 ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels", 676 ret = of_property_read_u32(dev->of_node, "#dma-channels",
685 &n_chans); 677 &n_chans);
686 if (ret) 678 if (ret)
687 return ret; 679 return ret;
@@ -719,7 +711,7 @@ err:
719 return -ENOMEM; 711 return -ENOMEM;
720} 712}
721 713
722static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) 714static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
723{ 715{
724 unsigned int mem_decs; 716 unsigned int mem_decs;
725 int i; 717 int i;
@@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
731 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); 723 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
732 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); 724 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
733 725
734 dma_free_coherent(&pdev->dev, mem_decs, cdd->cd, 726 dma_free_coherent(dev, mem_decs, cdd->cd,
735 cdd->descs_phys); 727 cdd->descs_phys);
736 } 728 }
737} 729}
@@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd)
741 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); 733 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
742} 734}
743 735
744static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd) 736static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
745{ 737{
746 disable_sched(cdd); 738 disable_sched(cdd);
747 739
748 purge_descs(pdev, cdd); 740 purge_descs(dev, cdd);
749 741
750 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); 742 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
751 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); 743 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
752 dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, 744 dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
753 cdd->scratch_phys); 745 cdd->scratch_phys);
754} 746}
755 747
756static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) 748static int init_descs(struct device *dev, struct cppi41_dd *cdd)
757{ 749{
758 unsigned int desc_size; 750 unsigned int desc_size;
759 unsigned int mem_decs; 751 unsigned int mem_decs;
@@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
777 reg |= ilog2(ALLOC_DECS_NUM) - 5; 769 reg |= ilog2(ALLOC_DECS_NUM) - 5;
778 770
779 BUILD_BUG_ON(DESCS_AREAS != 1); 771 BUILD_BUG_ON(DESCS_AREAS != 1);
780 cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs, 772 cdd->cd = dma_alloc_coherent(dev, mem_decs,
781 &cdd->descs_phys, GFP_KERNEL); 773 &cdd->descs_phys, GFP_KERNEL);
782 if (!cdd->cd) 774 if (!cdd->cd)
783 return -ENOMEM; 775 return -ENOMEM;
@@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd)
813 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); 805 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
814} 806}
815 807
816static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) 808static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
817{ 809{
818 int ret; 810 int ret;
819 811
820 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); 812 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
821 cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, 813 cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
822 &cdd->scratch_phys, GFP_KERNEL); 814 &cdd->scratch_phys, GFP_KERNEL);
823 if (!cdd->qmgr_scratch) 815 if (!cdd->qmgr_scratch)
824 return -ENOMEM; 816 return -ENOMEM;
@@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
827 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); 819 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
828 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); 820 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
829 821
830 ret = init_descs(pdev, cdd); 822 ret = init_descs(dev, cdd);
831 if (ret) 823 if (ret)
832 goto err_td; 824 goto err_td;
833 825
@@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
835 init_sched(cdd); 827 init_sched(cdd);
836 return 0; 828 return 0;
837err_td: 829err_td:
838 deinit_cpii41(pdev, cdd); 830 deinit_cppi41(dev, cdd);
839 return ret; 831 return ret;
840} 832}
841 833
@@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = {
914}; 906};
915MODULE_DEVICE_TABLE(of, cppi41_dma_ids); 907MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
916 908
917static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) 909static const struct cppi_glue_infos *get_glue_info(struct device *dev)
918{ 910{
919 const struct of_device_id *of_id; 911 const struct of_device_id *of_id;
920 912
921 of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node); 913 of_id = of_match_node(cppi41_dma_ids, dev->of_node);
922 if (!of_id) 914 if (!of_id)
923 return NULL; 915 return NULL;
924 return of_id->data; 916 return of_id->data;
@@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
927static int cppi41_dma_probe(struct platform_device *pdev) 919static int cppi41_dma_probe(struct platform_device *pdev)
928{ 920{
929 struct cppi41_dd *cdd; 921 struct cppi41_dd *cdd;
922 struct device *dev = &pdev->dev;
930 const struct cppi_glue_infos *glue_info; 923 const struct cppi_glue_infos *glue_info;
931 int irq; 924 int irq;
932 int ret; 925 int ret;
933 926
934 glue_info = get_glue_info(pdev); 927 glue_info = get_glue_info(dev);
935 if (!glue_info) 928 if (!glue_info)
936 return -EINVAL; 929 return -EINVAL;
937 930
@@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
946 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; 939 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
947 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; 940 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
948 cdd->ddev.device_control = cppi41_dma_control; 941 cdd->ddev.device_control = cppi41_dma_control;
949 cdd->ddev.dev = &pdev->dev; 942 cdd->ddev.dev = dev;
950 INIT_LIST_HEAD(&cdd->ddev.channels); 943 INIT_LIST_HEAD(&cdd->ddev.channels);
951 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; 944 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
952 945
953 cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0); 946 cdd->usbss_mem = of_iomap(dev->of_node, 0);
954 cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1); 947 cdd->ctrl_mem = of_iomap(dev->of_node, 1);
955 cdd->sched_mem = of_iomap(pdev->dev.of_node, 2); 948 cdd->sched_mem = of_iomap(dev->of_node, 2);
956 cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3); 949 cdd->qmgr_mem = of_iomap(dev->of_node, 3);
957 950
958 if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || 951 if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
959 !cdd->qmgr_mem) { 952 !cdd->qmgr_mem) {
@@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev)
961 goto err_remap; 954 goto err_remap;
962 } 955 }
963 956
964 pm_runtime_enable(&pdev->dev); 957 pm_runtime_enable(dev);
965 ret = pm_runtime_get_sync(&pdev->dev); 958 ret = pm_runtime_get_sync(dev);
966 if (ret) 959 if (ret < 0)
967 goto err_get_sync; 960 goto err_get_sync;
968 961
969 cdd->queues_rx = glue_info->queues_rx; 962 cdd->queues_rx = glue_info->queues_rx;
970 cdd->queues_tx = glue_info->queues_tx; 963 cdd->queues_tx = glue_info->queues_tx;
971 cdd->td_queue = glue_info->td_queue; 964 cdd->td_queue = glue_info->td_queue;
972 965
973 ret = init_cppi41(pdev, cdd); 966 ret = init_cppi41(dev, cdd);
974 if (ret) 967 if (ret)
975 goto err_init_cppi; 968 goto err_init_cppi;
976 969
977 ret = cppi41_add_chans(pdev, cdd); 970 ret = cppi41_add_chans(dev, cdd);
978 if (ret) 971 if (ret)
979 goto err_chans; 972 goto err_chans;
980 973
981 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 974 irq = irq_of_parse_and_map(dev->of_node, 0);
982 if (!irq) 975 if (!irq)
983 goto err_irq; 976 goto err_irq;
984 977
985 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); 978 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
986 979
987 ret = request_irq(irq, glue_info->isr, IRQF_SHARED, 980 ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
988 dev_name(&pdev->dev), cdd); 981 dev_name(dev), cdd);
989 if (ret) 982 if (ret)
990 goto err_irq; 983 goto err_irq;
991 cdd->irq = irq; 984 cdd->irq = irq;
@@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
994 if (ret) 987 if (ret)
995 goto err_dma_reg; 988 goto err_dma_reg;
996 989
997 ret = of_dma_controller_register(pdev->dev.of_node, 990 ret = of_dma_controller_register(dev->of_node,
998 cppi41_dma_xlate, &cpp41_dma_info); 991 cppi41_dma_xlate, &cpp41_dma_info);
999 if (ret) 992 if (ret)
1000 goto err_of; 993 goto err_of;
@@ -1009,11 +1002,11 @@ err_irq:
1009 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); 1002 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1010 cleanup_chans(cdd); 1003 cleanup_chans(cdd);
1011err_chans: 1004err_chans:
1012 deinit_cpii41(pdev, cdd); 1005 deinit_cppi41(dev, cdd);
1013err_init_cppi: 1006err_init_cppi:
1014 pm_runtime_put(&pdev->dev); 1007 pm_runtime_put(dev);
1015err_get_sync: 1008err_get_sync:
1016 pm_runtime_disable(&pdev->dev); 1009 pm_runtime_disable(dev);
1017 iounmap(cdd->usbss_mem); 1010 iounmap(cdd->usbss_mem);
1018 iounmap(cdd->ctrl_mem); 1011 iounmap(cdd->ctrl_mem);
1019 iounmap(cdd->sched_mem); 1012 iounmap(cdd->sched_mem);
@@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
1033 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); 1026 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1034 free_irq(cdd->irq, cdd); 1027 free_irq(cdd->irq, cdd);
1035 cleanup_chans(cdd); 1028 cleanup_chans(cdd);
1036 deinit_cpii41(pdev, cdd); 1029 deinit_cppi41(&pdev->dev, cdd);
1037 iounmap(cdd->usbss_mem); 1030 iounmap(cdd->usbss_mem);
1038 iounmap(cdd->ctrl_mem); 1031 iounmap(cdd->ctrl_mem);
1039 iounmap(cdd->sched_mem); 1032 iounmap(cdd->sched_mem);
@@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev)
1044 return 0; 1037 return 0;
1045} 1038}
1046 1039
1040#ifdef CONFIG_PM_SLEEP
1041static int cppi41_suspend(struct device *dev)
1042{
1043 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1044
1045 cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
1046 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1047 disable_sched(cdd);
1048
1049 return 0;
1050}
1051
1052static int cppi41_resume(struct device *dev)
1053{
1054 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1055 struct cppi41_channel *c;
1056 int i;
1057
1058 for (i = 0; i < DESCS_AREAS; i++)
1059 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
1060
1061 list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
1062 if (!c->is_tx)
1063 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
1064
1065 init_sched(cdd);
1066
1067 cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
1068 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
1069 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
1070 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
1071
1072 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
1073
1074 return 0;
1075}
1076#endif
1077
1078static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
1079
1047static struct platform_driver cpp41_dma_driver = { 1080static struct platform_driver cpp41_dma_driver = {
1048 .probe = cppi41_dma_probe, 1081 .probe = cppi41_dma_probe,
1049 .remove = cppi41_dma_remove, 1082 .remove = cppi41_dma_remove,
1050 .driver = { 1083 .driver = {
1051 .name = "cppi41-dma-engine", 1084 .name = "cppi41-dma-engine",
1052 .owner = THIS_MODULE, 1085 .owner = THIS_MODULE,
1086 .pm = &cppi41_pm_ops,
1053 .of_match_table = of_match_ptr(cppi41_dma_ids), 1087 .of_match_table = of_match_ptr(cppi41_dma_ids),
1054 }, 1088 },
1055}; 1089};
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index b0c0c8268d42..94c380f07538 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
491 unsigned long flags; 491 unsigned long flags;
492 492
493 status = dma_cookie_status(c, cookie, state); 493 status = dma_cookie_status(c, cookie, state);
494 if (status == DMA_SUCCESS || !state) 494 if (status == DMA_COMPLETE || !state)
495 return status; 495 return status;
496 496
497 spin_lock_irqsave(&chan->vchan.lock, flags); 497 spin_lock_irqsave(&chan->vchan.lock, flags);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 9162ac80c18f..ea806bdc12ef 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -65,6 +65,7 @@
65#include <linux/acpi.h> 65#include <linux/acpi.h>
66#include <linux/acpi_dma.h> 66#include <linux/acpi_dma.h>
67#include <linux/of_dma.h> 67#include <linux/of_dma.h>
68#include <linux/mempool.h>
68 69
69static DEFINE_MUTEX(dma_list_mutex); 70static DEFINE_MUTEX(dma_list_mutex);
70static DEFINE_IDR(dma_idr); 71static DEFINE_IDR(dma_idr);
@@ -901,98 +902,132 @@ void dma_async_device_unregister(struct dma_device *device)
901} 902}
902EXPORT_SYMBOL(dma_async_device_unregister); 903EXPORT_SYMBOL(dma_async_device_unregister);
903 904
904/** 905struct dmaengine_unmap_pool {
905 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses 906 struct kmem_cache *cache;
906 * @chan: DMA channel to offload copy to 907 const char *name;
907 * @dest: destination address (virtual) 908 mempool_t *pool;
908 * @src: source address (virtual) 909 size_t size;
909 * @len: length 910};
910 *
911 * Both @dest and @src must be mappable to a bus address according to the
912 * DMA mapping API rules for streaming mappings.
913 * Both @dest and @src must stay memory resident (kernel memory or locked
914 * user space pages).
915 */
916dma_cookie_t
917dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
918 void *src, size_t len)
919{
920 struct dma_device *dev = chan->device;
921 struct dma_async_tx_descriptor *tx;
922 dma_addr_t dma_dest, dma_src;
923 dma_cookie_t cookie;
924 unsigned long flags;
925 911
926 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
927 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); 913static struct dmaengine_unmap_pool unmap_pool[] = {
928 flags = DMA_CTRL_ACK | 914 __UNMAP_POOL(2),
929 DMA_COMPL_SRC_UNMAP_SINGLE | 915 #if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
930 DMA_COMPL_DEST_UNMAP_SINGLE; 916 __UNMAP_POOL(16),
931 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 917 __UNMAP_POOL(128),
918 __UNMAP_POOL(256),
919 #endif
920};
932 921
933 if (!tx) { 922static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
934 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 923{
935 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 924 int order = get_count_order(nr);
936 return -ENOMEM; 925
926 switch (order) {
927 case 0 ... 1:
928 return &unmap_pool[0];
929 case 2 ... 4:
930 return &unmap_pool[1];
931 case 5 ... 7:
932 return &unmap_pool[2];
933 case 8:
934 return &unmap_pool[3];
935 default:
936 BUG();
937 return NULL;
937 } 938 }
939}
938 940
939 tx->callback = NULL; 941static void dmaengine_unmap(struct kref *kref)
940 cookie = tx->tx_submit(tx); 942{
943 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
944 struct device *dev = unmap->dev;
945 int cnt, i;
946
947 cnt = unmap->to_cnt;
948 for (i = 0; i < cnt; i++)
949 dma_unmap_page(dev, unmap->addr[i], unmap->len,
950 DMA_TO_DEVICE);
951 cnt += unmap->from_cnt;
952 for (; i < cnt; i++)
953 dma_unmap_page(dev, unmap->addr[i], unmap->len,
954 DMA_FROM_DEVICE);
955 cnt += unmap->bidi_cnt;
956 for (; i < cnt; i++) {
957 if (unmap->addr[i] == 0)
958 continue;
959 dma_unmap_page(dev, unmap->addr[i], unmap->len,
960 DMA_BIDIRECTIONAL);
961 }
962 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
963}
941 964
942 preempt_disable(); 965void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
943 __this_cpu_add(chan->local->bytes_transferred, len); 966{
944 __this_cpu_inc(chan->local->memcpy_count); 967 if (unmap)
945 preempt_enable(); 968 kref_put(&unmap->kref, dmaengine_unmap);
969}
970EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
946 971
947 return cookie; 972static void dmaengine_destroy_unmap_pool(void)
973{
974 int i;
975
976 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
977 struct dmaengine_unmap_pool *p = &unmap_pool[i];
978
979 if (p->pool)
980 mempool_destroy(p->pool);
981 p->pool = NULL;
982 if (p->cache)
983 kmem_cache_destroy(p->cache);
984 p->cache = NULL;
985 }
948} 986}
949EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
950 987
951/** 988static int __init dmaengine_init_unmap_pool(void)
952 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
953 * @chan: DMA channel to offload copy to
954 * @page: destination page
955 * @offset: offset in page to copy to
956 * @kdata: source address (virtual)
957 * @len: length
958 *
959 * Both @page/@offset and @kdata must be mappable to a bus address according
960 * to the DMA mapping API rules for streaming mappings.
961 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
962 * locked user space pages)
963 */
964dma_cookie_t
965dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
966 unsigned int offset, void *kdata, size_t len)
967{ 989{
968 struct dma_device *dev = chan->device; 990 int i;
969 struct dma_async_tx_descriptor *tx;
970 dma_addr_t dma_dest, dma_src;
971 dma_cookie_t cookie;
972 unsigned long flags;
973 991
974 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 992 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
975 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); 993 struct dmaengine_unmap_pool *p = &unmap_pool[i];
976 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; 994 size_t size;
977 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
978 995
979 if (!tx) { 996 size = sizeof(struct dmaengine_unmap_data) +
980 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 997 sizeof(dma_addr_t) * p->size;
981 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 998
982 return -ENOMEM; 999 p->cache = kmem_cache_create(p->name, size, 0,
1000 SLAB_HWCACHE_ALIGN, NULL);
1001 if (!p->cache)
1002 break;
1003 p->pool = mempool_create_slab_pool(1, p->cache);
1004 if (!p->pool)
1005 break;
983 } 1006 }
984 1007
985 tx->callback = NULL; 1008 if (i == ARRAY_SIZE(unmap_pool))
986 cookie = tx->tx_submit(tx); 1009 return 0;
987 1010
988 preempt_disable(); 1011 dmaengine_destroy_unmap_pool();
989 __this_cpu_add(chan->local->bytes_transferred, len); 1012 return -ENOMEM;
990 __this_cpu_inc(chan->local->memcpy_count); 1013}
991 preempt_enable();
992 1014
993 return cookie; 1015struct dmaengine_unmap_data *
1016dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1017{
1018 struct dmaengine_unmap_data *unmap;
1019
1020 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1021 if (!unmap)
1022 return NULL;
1023
1024 memset(unmap, 0, sizeof(*unmap));
1025 kref_init(&unmap->kref);
1026 unmap->dev = dev;
1027
1028 return unmap;
994} 1029}
995EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); 1030EXPORT_SYMBOL(dmaengine_get_unmap_data);
996 1031
997/** 1032/**
998 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 1033 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
@@ -1015,24 +1050,33 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1015{ 1050{
1016 struct dma_device *dev = chan->device; 1051 struct dma_device *dev = chan->device;
1017 struct dma_async_tx_descriptor *tx; 1052 struct dma_async_tx_descriptor *tx;
1018 dma_addr_t dma_dest, dma_src; 1053 struct dmaengine_unmap_data *unmap;
1019 dma_cookie_t cookie; 1054 dma_cookie_t cookie;
1020 unsigned long flags; 1055 unsigned long flags;
1021 1056
1022 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
1023 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, 1058 if (!unmap)
1024 DMA_FROM_DEVICE); 1059 return -ENOMEM;
1060
1061 unmap->to_cnt = 1;
1062 unmap->from_cnt = 1;
1063 unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
1064 DMA_TO_DEVICE);
1065 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1066 DMA_FROM_DEVICE);
1067 unmap->len = len;
1025 flags = DMA_CTRL_ACK; 1068 flags = DMA_CTRL_ACK;
1026 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 1069 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1070 len, flags);
1027 1071
1028 if (!tx) { 1072 if (!tx) {
1029 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); 1073 dmaengine_unmap_put(unmap);
1030 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
1031 return -ENOMEM; 1074 return -ENOMEM;
1032 } 1075 }
1033 1076
1034 tx->callback = NULL; 1077 dma_set_unmap(tx, unmap);
1035 cookie = tx->tx_submit(tx); 1078 cookie = tx->tx_submit(tx);
1079 dmaengine_unmap_put(unmap);
1036 1080
1037 preempt_disable(); 1081 preempt_disable();
1038 __this_cpu_add(chan->local->bytes_transferred, len); 1082 __this_cpu_add(chan->local->bytes_transferred, len);
@@ -1043,6 +1087,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1043} 1087}
1044EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); 1088EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1045 1089
1090/**
1091 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
1092 * @chan: DMA channel to offload copy to
1093 * @dest: destination address (virtual)
1094 * @src: source address (virtual)
1095 * @len: length
1096 *
1097 * Both @dest and @src must be mappable to a bus address according to the
1098 * DMA mapping API rules for streaming mappings.
1099 * Both @dest and @src must stay memory resident (kernel memory or locked
1100 * user space pages).
1101 */
1102dma_cookie_t
1103dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
1104 void *src, size_t len)
1105{
1106 return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
1107 (unsigned long) dest & ~PAGE_MASK,
1108 virt_to_page(src),
1109 (unsigned long) src & ~PAGE_MASK, len);
1110}
1111EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
1112
1113/**
1114 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
1115 * @chan: DMA channel to offload copy to
1116 * @page: destination page
1117 * @offset: offset in page to copy to
1118 * @kdata: source address (virtual)
1119 * @len: length
1120 *
1121 * Both @page/@offset and @kdata must be mappable to a bus address according
1122 * to the DMA mapping API rules for streaming mappings.
1123 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
1124 * locked user space pages)
1125 */
1126dma_cookie_t
1127dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
1128 unsigned int offset, void *kdata, size_t len)
1129{
1130 return dma_async_memcpy_pg_to_pg(chan, page, offset,
1131 virt_to_page(kdata),
1132 (unsigned long) kdata & ~PAGE_MASK, len);
1133}
1134EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
1135
1046void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 1136void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1047 struct dma_chan *chan) 1137 struct dma_chan *chan)
1048{ 1138{
@@ -1062,7 +1152,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1062 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 1152 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1063 1153
1064 if (!tx) 1154 if (!tx)
1065 return DMA_SUCCESS; 1155 return DMA_COMPLETE;
1066 1156
1067 while (tx->cookie == -EBUSY) { 1157 while (tx->cookie == -EBUSY) {
1068 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1158 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
@@ -1116,6 +1206,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
1116 1206
1117static int __init dma_bus_init(void) 1207static int __init dma_bus_init(void)
1118{ 1208{
1209 int err = dmaengine_init_unmap_pool();
1210
1211 if (err)
1212 return err;
1119 return class_register(&dma_devclass); 1213 return class_register(&dma_devclass);
1120} 1214}
1121arch_initcall(dma_bus_init); 1215arch_initcall(dma_bus_init);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 92f796cdc6ab..20f9a3aaf926 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -8,6 +8,8 @@
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
11#include <linux/delay.h> 13#include <linux/delay.h>
12#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
13#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
@@ -19,10 +21,6 @@
19#include <linux/random.h> 21#include <linux/random.h>
20#include <linux/slab.h> 22#include <linux/slab.h>
21#include <linux/wait.h> 23#include <linux/wait.h>
22#include <linux/ctype.h>
23#include <linux/debugfs.h>
24#include <linux/uaccess.h>
25#include <linux/seq_file.h>
26 24
27static unsigned int test_buf_size = 16384; 25static unsigned int test_buf_size = 16384;
28module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); 26module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
@@ -68,92 +66,13 @@ module_param(timeout, uint, S_IRUGO | S_IWUSR);
68MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " 66MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
69 "Pass -1 for infinite timeout"); 67 "Pass -1 for infinite timeout");
70 68
71/* Maximum amount of mismatched bytes in buffer to print */ 69static bool noverify;
72#define MAX_ERROR_COUNT 32 70module_param(noverify, bool, S_IRUGO | S_IWUSR);
73 71MODULE_PARM_DESC(noverify, "Disable random data setup and verification");
74/*
75 * Initialization patterns. All bytes in the source buffer has bit 7
76 * set, all bytes in the destination buffer has bit 7 cleared.
77 *
78 * Bit 6 is set for all bytes which are to be copied by the DMA
79 * engine. Bit 5 is set for all bytes which are to be overwritten by
80 * the DMA engine.
81 *
82 * The remaining bits are the inverse of a counter which increments by
83 * one for each byte address.
84 */
85#define PATTERN_SRC 0x80
86#define PATTERN_DST 0x00
87#define PATTERN_COPY 0x40
88#define PATTERN_OVERWRITE 0x20
89#define PATTERN_COUNT_MASK 0x1f
90
91enum dmatest_error_type {
92 DMATEST_ET_OK,
93 DMATEST_ET_MAP_SRC,
94 DMATEST_ET_MAP_DST,
95 DMATEST_ET_PREP,
96 DMATEST_ET_SUBMIT,
97 DMATEST_ET_TIMEOUT,
98 DMATEST_ET_DMA_ERROR,
99 DMATEST_ET_DMA_IN_PROGRESS,
100 DMATEST_ET_VERIFY,
101 DMATEST_ET_VERIFY_BUF,
102};
103
104struct dmatest_verify_buffer {
105 unsigned int index;
106 u8 expected;
107 u8 actual;
108};
109
110struct dmatest_verify_result {
111 unsigned int error_count;
112 struct dmatest_verify_buffer data[MAX_ERROR_COUNT];
113 u8 pattern;
114 bool is_srcbuf;
115};
116
117struct dmatest_thread_result {
118 struct list_head node;
119 unsigned int n;
120 unsigned int src_off;
121 unsigned int dst_off;
122 unsigned int len;
123 enum dmatest_error_type type;
124 union {
125 unsigned long data;
126 dma_cookie_t cookie;
127 enum dma_status status;
128 int error;
129 struct dmatest_verify_result *vr;
130 };
131};
132
133struct dmatest_result {
134 struct list_head node;
135 char *name;
136 struct list_head results;
137};
138
139struct dmatest_info;
140
141struct dmatest_thread {
142 struct list_head node;
143 struct dmatest_info *info;
144 struct task_struct *task;
145 struct dma_chan *chan;
146 u8 **srcs;
147 u8 **dsts;
148 enum dma_transaction_type type;
149 bool done;
150};
151 72
152struct dmatest_chan { 73static bool verbose;
153 struct list_head node; 74module_param(verbose, bool, S_IRUGO | S_IWUSR);
154 struct dma_chan *chan; 75MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
155 struct list_head threads;
156};
157 76
158/** 77/**
159 * struct dmatest_params - test parameters. 78 * struct dmatest_params - test parameters.
@@ -177,6 +96,7 @@ struct dmatest_params {
177 unsigned int xor_sources; 96 unsigned int xor_sources;
178 unsigned int pq_sources; 97 unsigned int pq_sources;
179 int timeout; 98 int timeout;
99 bool noverify;
180}; 100};
181 101
182/** 102/**
@@ -184,7 +104,7 @@ struct dmatest_params {
184 * @params: test parameters 104 * @params: test parameters
185 * @lock: access protection to the fields of this structure 105 * @lock: access protection to the fields of this structure
186 */ 106 */
187struct dmatest_info { 107static struct dmatest_info {
188 /* Test parameters */ 108 /* Test parameters */
189 struct dmatest_params params; 109 struct dmatest_params params;
190 110
@@ -192,16 +112,95 @@ struct dmatest_info {
192 struct list_head channels; 112 struct list_head channels;
193 unsigned int nr_channels; 113 unsigned int nr_channels;
194 struct mutex lock; 114 struct mutex lock;
115 bool did_init;
116} test_info = {
117 .channels = LIST_HEAD_INIT(test_info.channels),
118 .lock = __MUTEX_INITIALIZER(test_info.lock),
119};
120
121static int dmatest_run_set(const char *val, const struct kernel_param *kp);
122static int dmatest_run_get(char *val, const struct kernel_param *kp);
123static struct kernel_param_ops run_ops = {
124 .set = dmatest_run_set,
125 .get = dmatest_run_get,
126};
127static bool dmatest_run;
128module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
129MODULE_PARM_DESC(run, "Run the test (default: false)");
130
131/* Maximum amount of mismatched bytes in buffer to print */
132#define MAX_ERROR_COUNT 32
133
134/*
135 * Initialization patterns. All bytes in the source buffer has bit 7
136 * set, all bytes in the destination buffer has bit 7 cleared.
137 *
138 * Bit 6 is set for all bytes which are to be copied by the DMA
139 * engine. Bit 5 is set for all bytes which are to be overwritten by
140 * the DMA engine.
141 *
142 * The remaining bits are the inverse of a counter which increments by
143 * one for each byte address.
144 */
145#define PATTERN_SRC 0x80
146#define PATTERN_DST 0x00
147#define PATTERN_COPY 0x40
148#define PATTERN_OVERWRITE 0x20
149#define PATTERN_COUNT_MASK 0x1f
195 150
196 /* debugfs related stuff */ 151struct dmatest_thread {
197 struct dentry *root; 152 struct list_head node;
153 struct dmatest_info *info;
154 struct task_struct *task;
155 struct dma_chan *chan;
156 u8 **srcs;
157 u8 **dsts;
158 enum dma_transaction_type type;
159 bool done;
160};
198 161
199 /* Test results */ 162struct dmatest_chan {
200 struct list_head results; 163 struct list_head node;
201 struct mutex results_lock; 164 struct dma_chan *chan;
165 struct list_head threads;
202}; 166};
203 167
204static struct dmatest_info test_info; 168static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
169static bool wait;
170
171static bool is_threaded_test_run(struct dmatest_info *info)
172{
173 struct dmatest_chan *dtc;
174
175 list_for_each_entry(dtc, &info->channels, node) {
176 struct dmatest_thread *thread;
177
178 list_for_each_entry(thread, &dtc->threads, node) {
179 if (!thread->done)
180 return true;
181 }
182 }
183
184 return false;
185}
186
187static int dmatest_wait_get(char *val, const struct kernel_param *kp)
188{
189 struct dmatest_info *info = &test_info;
190 struct dmatest_params *params = &info->params;
191
192 if (params->iterations)
193 wait_event(thread_wait, !is_threaded_test_run(info));
194 wait = true;
195 return param_get_bool(val, kp);
196}
197
198static struct kernel_param_ops wait_ops = {
199 .get = dmatest_wait_get,
200 .set = param_set_bool,
201};
202module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
203MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
205 204
206static bool dmatest_match_channel(struct dmatest_params *params, 205static bool dmatest_match_channel(struct dmatest_params *params,
207 struct dma_chan *chan) 206 struct dma_chan *chan)
@@ -223,7 +222,7 @@ static unsigned long dmatest_random(void)
223{ 222{
224 unsigned long buf; 223 unsigned long buf;
225 224
226 get_random_bytes(&buf, sizeof(buf)); 225 prandom_bytes(&buf, sizeof(buf));
227 return buf; 226 return buf;
228} 227}
229 228
@@ -262,9 +261,31 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
262 } 261 }
263} 262}
264 263
265static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, 264static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
266 unsigned int start, unsigned int end, unsigned int counter, 265 unsigned int counter, bool is_srcbuf)
267 u8 pattern, bool is_srcbuf) 266{
267 u8 diff = actual ^ pattern;
268 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
269 const char *thread_name = current->comm;
270
271 if (is_srcbuf)
272 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
273 thread_name, index, expected, actual);
274 else if ((pattern & PATTERN_COPY)
275 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
276 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
277 thread_name, index, expected, actual);
278 else if (diff & PATTERN_SRC)
279 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
280 thread_name, index, expected, actual);
281 else
282 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
283 thread_name, index, expected, actual);
284}
285
286static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
287 unsigned int end, unsigned int counter, u8 pattern,
288 bool is_srcbuf)
268{ 289{
269 unsigned int i; 290 unsigned int i;
270 unsigned int error_count = 0; 291 unsigned int error_count = 0;
@@ -272,7 +293,6 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
272 u8 expected; 293 u8 expected;
273 u8 *buf; 294 u8 *buf;
274 unsigned int counter_orig = counter; 295 unsigned int counter_orig = counter;
275 struct dmatest_verify_buffer *vb;
276 296
277 for (; (buf = *bufs); bufs++) { 297 for (; (buf = *bufs); bufs++) {
278 counter = counter_orig; 298 counter = counter_orig;
@@ -280,12 +300,9 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
280 actual = buf[i]; 300 actual = buf[i];
281 expected = pattern | (~counter & PATTERN_COUNT_MASK); 301 expected = pattern | (~counter & PATTERN_COUNT_MASK);
282 if (actual != expected) { 302 if (actual != expected) {
283 if (error_count < MAX_ERROR_COUNT && vr) { 303 if (error_count < MAX_ERROR_COUNT)
284 vb = &vr->data[error_count]; 304 dmatest_mismatch(actual, pattern, i,
285 vb->index = i; 305 counter, is_srcbuf);
286 vb->expected = expected;
287 vb->actual = actual;
288 }
289 error_count++; 306 error_count++;
290 } 307 }
291 counter++; 308 counter++;
@@ -293,7 +310,7 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
293 } 310 }
294 311
295 if (error_count > MAX_ERROR_COUNT) 312 if (error_count > MAX_ERROR_COUNT)
296 pr_warning("%s: %u errors suppressed\n", 313 pr_warn("%s: %u errors suppressed\n",
297 current->comm, error_count - MAX_ERROR_COUNT); 314 current->comm, error_count - MAX_ERROR_COUNT);
298 315
299 return error_count; 316 return error_count;
@@ -313,20 +330,6 @@ static void dmatest_callback(void *arg)
313 wake_up_all(done->wait); 330 wake_up_all(done->wait);
314} 331}
315 332
316static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
317 unsigned int count)
318{
319 while (count--)
320 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
321}
322
323static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
324 unsigned int count)
325{
326 while (count--)
327 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
328}
329
330static unsigned int min_odd(unsigned int x, unsigned int y) 333static unsigned int min_odd(unsigned int x, unsigned int y)
331{ 334{
332 unsigned int val = min(x, y); 335 unsigned int val = min(x, y);
@@ -334,172 +337,49 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
334 return val % 2 ? val : val - 1; 337 return val % 2 ? val : val - 1;
335} 338}
336 339
337static char *verify_result_get_one(struct dmatest_verify_result *vr, 340static void result(const char *err, unsigned int n, unsigned int src_off,
338 unsigned int i) 341 unsigned int dst_off, unsigned int len, unsigned long data)
339{ 342{
340 struct dmatest_verify_buffer *vb = &vr->data[i]; 343 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
341 u8 diff = vb->actual ^ vr->pattern; 344 current->comm, n, err, src_off, dst_off, len, data);
342 static char buf[512];
343 char *msg;
344
345 if (vr->is_srcbuf)
346 msg = "srcbuf overwritten!";
347 else if ((vr->pattern & PATTERN_COPY)
348 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
349 msg = "dstbuf not copied!";
350 else if (diff & PATTERN_SRC)
351 msg = "dstbuf was copied!";
352 else
353 msg = "dstbuf mismatch!";
354
355 snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
356 vb->index, vb->expected, vb->actual);
357
358 return buf;
359} 345}
360 346
361static char *thread_result_get(const char *name, 347static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
362 struct dmatest_thread_result *tr) 348 unsigned int dst_off, unsigned int len,
349 unsigned long data)
363{ 350{
364 static const char * const messages[] = { 351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
365 [DMATEST_ET_OK] = "No errors", 352 current->comm, n, err, src_off, dst_off, len, data);
366 [DMATEST_ET_MAP_SRC] = "src mapping error",
367 [DMATEST_ET_MAP_DST] = "dst mapping error",
368 [DMATEST_ET_PREP] = "prep error",
369 [DMATEST_ET_SUBMIT] = "submit error",
370 [DMATEST_ET_TIMEOUT] = "test timed out",
371 [DMATEST_ET_DMA_ERROR] =
372 "got completion callback (DMA_ERROR)",
373 [DMATEST_ET_DMA_IN_PROGRESS] =
374 "got completion callback (DMA_IN_PROGRESS)",
375 [DMATEST_ET_VERIFY] = "errors",
376 [DMATEST_ET_VERIFY_BUF] = "verify errors",
377 };
378 static char buf[512];
379
380 snprintf(buf, sizeof(buf) - 1,
381 "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
382 name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
383 tr->len, tr->data);
384
385 return buf;
386} 353}
387 354
388static int thread_result_add(struct dmatest_info *info, 355#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
389 struct dmatest_result *r, enum dmatest_error_type type, 356 if (verbose) \
390 unsigned int n, unsigned int src_off, unsigned int dst_off, 357 result(err, n, src_off, dst_off, len, data); \
391 unsigned int len, unsigned long data) 358 else \
392{ 359 dbg_result(err, n, src_off, dst_off, len, data); \
393 struct dmatest_thread_result *tr; 360})
394
395 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
396 if (!tr)
397 return -ENOMEM;
398
399 tr->type = type;
400 tr->n = n;
401 tr->src_off = src_off;
402 tr->dst_off = dst_off;
403 tr->len = len;
404 tr->data = data;
405 361
406 mutex_lock(&info->results_lock); 362static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
407 list_add_tail(&tr->node, &r->results);
408 mutex_unlock(&info->results_lock);
409
410 if (tr->type == DMATEST_ET_OK)
411 pr_debug("%s\n", thread_result_get(r->name, tr));
412 else
413 pr_warn("%s\n", thread_result_get(r->name, tr));
414
415 return 0;
416}
417
418static unsigned int verify_result_add(struct dmatest_info *info,
419 struct dmatest_result *r, unsigned int n,
420 unsigned int src_off, unsigned int dst_off, unsigned int len,
421 u8 **bufs, int whence, unsigned int counter, u8 pattern,
422 bool is_srcbuf)
423{ 363{
424 struct dmatest_verify_result *vr; 364 unsigned long long per_sec = 1000000;
425 unsigned int error_count;
426 unsigned int buf_off = is_srcbuf ? src_off : dst_off;
427 unsigned int start, end;
428
429 if (whence < 0) {
430 start = 0;
431 end = buf_off;
432 } else if (whence > 0) {
433 start = buf_off + len;
434 end = info->params.buf_size;
435 } else {
436 start = buf_off;
437 end = buf_off + len;
438 }
439 365
440 vr = kmalloc(sizeof(*vr), GFP_KERNEL); 366 if (runtime <= 0)
441 if (!vr) { 367 return 0;
442 pr_warn("dmatest: No memory to store verify result\n");
443 return dmatest_verify(NULL, bufs, start, end, counter, pattern,
444 is_srcbuf);
445 }
446
447 vr->pattern = pattern;
448 vr->is_srcbuf = is_srcbuf;
449
450 error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
451 is_srcbuf);
452 if (error_count) {
453 vr->error_count = error_count;
454 thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
455 dst_off, len, (unsigned long)vr);
456 return error_count;
457 }
458
459 kfree(vr);
460 return 0;
461}
462
463static void result_free(struct dmatest_info *info, const char *name)
464{
465 struct dmatest_result *r, *_r;
466
467 mutex_lock(&info->results_lock);
468 list_for_each_entry_safe(r, _r, &info->results, node) {
469 struct dmatest_thread_result *tr, *_tr;
470
471 if (name && strcmp(r->name, name))
472 continue;
473
474 list_for_each_entry_safe(tr, _tr, &r->results, node) {
475 if (tr->type == DMATEST_ET_VERIFY_BUF)
476 kfree(tr->vr);
477 list_del(&tr->node);
478 kfree(tr);
479 }
480 368
481 kfree(r->name); 369 /* drop precision until runtime is 32-bits */
482 list_del(&r->node); 370 while (runtime > UINT_MAX) {
483 kfree(r); 371 runtime >>= 1;
372 per_sec <<= 1;
484 } 373 }
485 374
486 mutex_unlock(&info->results_lock); 375 per_sec *= val;
376 do_div(per_sec, runtime);
377 return per_sec;
487} 378}
488 379
489static struct dmatest_result *result_init(struct dmatest_info *info, 380static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
490 const char *name)
491{ 381{
492 struct dmatest_result *r; 382 return dmatest_persec(runtime, len >> 10);
493
494 r = kzalloc(sizeof(*r), GFP_KERNEL);
495 if (r) {
496 r->name = kstrdup(name, GFP_KERNEL);
497 INIT_LIST_HEAD(&r->results);
498 mutex_lock(&info->results_lock);
499 list_add_tail(&r->node, &info->results);
500 mutex_unlock(&info->results_lock);
501 }
502 return r;
503} 383}
504 384
505/* 385/*
@@ -525,7 +405,6 @@ static int dmatest_func(void *data)
525 struct dmatest_params *params; 405 struct dmatest_params *params;
526 struct dma_chan *chan; 406 struct dma_chan *chan;
527 struct dma_device *dev; 407 struct dma_device *dev;
528 const char *thread_name;
529 unsigned int src_off, dst_off, len; 408 unsigned int src_off, dst_off, len;
530 unsigned int error_count; 409 unsigned int error_count;
531 unsigned int failed_tests = 0; 410 unsigned int failed_tests = 0;
@@ -538,9 +417,10 @@ static int dmatest_func(void *data)
538 int src_cnt; 417 int src_cnt;
539 int dst_cnt; 418 int dst_cnt;
540 int i; 419 int i;
541 struct dmatest_result *result; 420 ktime_t ktime;
421 s64 runtime = 0;
422 unsigned long long total_len = 0;
542 423
543 thread_name = current->comm;
544 set_freezable(); 424 set_freezable();
545 425
546 ret = -ENOMEM; 426 ret = -ENOMEM;
@@ -570,10 +450,6 @@ static int dmatest_func(void *data)
570 } else 450 } else
571 goto err_thread_type; 451 goto err_thread_type;
572 452
573 result = result_init(info, thread_name);
574 if (!result)
575 goto err_srcs;
576
577 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); 453 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
578 if (!thread->srcs) 454 if (!thread->srcs)
579 goto err_srcs; 455 goto err_srcs;
@@ -597,17 +473,17 @@ static int dmatest_func(void *data)
597 set_user_nice(current, 10); 473 set_user_nice(current, 10);
598 474
599 /* 475 /*
600 * src buffers are freed by the DMAEngine code with dma_unmap_single() 476 * src and dst buffers are freed by ourselves below
601 * dst buffers are freed by ourselves below
602 */ 477 */
603 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT 478 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
604 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
605 479
480 ktime = ktime_get();
606 while (!kthread_should_stop() 481 while (!kthread_should_stop()
607 && !(params->iterations && total_tests >= params->iterations)) { 482 && !(params->iterations && total_tests >= params->iterations)) {
608 struct dma_async_tx_descriptor *tx = NULL; 483 struct dma_async_tx_descriptor *tx = NULL;
609 dma_addr_t dma_srcs[src_cnt]; 484 struct dmaengine_unmap_data *um;
610 dma_addr_t dma_dsts[dst_cnt]; 485 dma_addr_t srcs[src_cnt];
486 dma_addr_t *dsts;
611 u8 align = 0; 487 u8 align = 0;
612 488
613 total_tests++; 489 total_tests++;
@@ -626,81 +502,103 @@ static int dmatest_func(void *data)
626 break; 502 break;
627 } 503 }
628 504
629 len = dmatest_random() % params->buf_size + 1; 505 if (params->noverify) {
506 len = params->buf_size;
507 src_off = 0;
508 dst_off = 0;
509 } else {
510 len = dmatest_random() % params->buf_size + 1;
511 len = (len >> align) << align;
512 if (!len)
513 len = 1 << align;
514 src_off = dmatest_random() % (params->buf_size - len + 1);
515 dst_off = dmatest_random() % (params->buf_size - len + 1);
516
517 src_off = (src_off >> align) << align;
518 dst_off = (dst_off >> align) << align;
519
520 dmatest_init_srcs(thread->srcs, src_off, len,
521 params->buf_size);
522 dmatest_init_dsts(thread->dsts, dst_off, len,
523 params->buf_size);
524 }
525
630 len = (len >> align) << align; 526 len = (len >> align) << align;
631 if (!len) 527 if (!len)
632 len = 1 << align; 528 len = 1 << align;
633 src_off = dmatest_random() % (params->buf_size - len + 1); 529 total_len += len;
634 dst_off = dmatest_random() % (params->buf_size - len + 1);
635 530
636 src_off = (src_off >> align) << align; 531 um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
637 dst_off = (dst_off >> align) << align; 532 GFP_KERNEL);
638 533 if (!um) {
639 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); 534 failed_tests++;
640 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); 535 result("unmap data NULL", total_tests,
536 src_off, dst_off, len, ret);
537 continue;
538 }
641 539
540 um->len = params->buf_size;
642 for (i = 0; i < src_cnt; i++) { 541 for (i = 0; i < src_cnt; i++) {
643 u8 *buf = thread->srcs[i] + src_off; 542 unsigned long buf = (unsigned long) thread->srcs[i];
644 543 struct page *pg = virt_to_page(buf);
645 dma_srcs[i] = dma_map_single(dev->dev, buf, len, 544 unsigned pg_off = buf & ~PAGE_MASK;
646 DMA_TO_DEVICE); 545
647 ret = dma_mapping_error(dev->dev, dma_srcs[i]); 546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
547 um->len, DMA_TO_DEVICE);
548 srcs[i] = um->addr[i] + src_off;
549 ret = dma_mapping_error(dev->dev, um->addr[i]);
648 if (ret) { 550 if (ret) {
649 unmap_src(dev->dev, dma_srcs, len, i); 551 dmaengine_unmap_put(um);
650 thread_result_add(info, result, 552 result("src mapping error", total_tests,
651 DMATEST_ET_MAP_SRC, 553 src_off, dst_off, len, ret);
652 total_tests, src_off, dst_off,
653 len, ret);
654 failed_tests++; 554 failed_tests++;
655 continue; 555 continue;
656 } 556 }
557 um->to_cnt++;
657 } 558 }
658 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
560 dsts = &um->addr[src_cnt];
659 for (i = 0; i < dst_cnt; i++) { 561 for (i = 0; i < dst_cnt; i++) {
660 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], 562 unsigned long buf = (unsigned long) thread->dsts[i];
661 params->buf_size, 563 struct page *pg = virt_to_page(buf);
662 DMA_BIDIRECTIONAL); 564 unsigned pg_off = buf & ~PAGE_MASK;
663 ret = dma_mapping_error(dev->dev, dma_dsts[i]); 565
566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
567 DMA_BIDIRECTIONAL);
568 ret = dma_mapping_error(dev->dev, dsts[i]);
664 if (ret) { 569 if (ret) {
665 unmap_src(dev->dev, dma_srcs, len, src_cnt); 570 dmaengine_unmap_put(um);
666 unmap_dst(dev->dev, dma_dsts, params->buf_size, 571 result("dst mapping error", total_tests,
667 i); 572 src_off, dst_off, len, ret);
668 thread_result_add(info, result,
669 DMATEST_ET_MAP_DST,
670 total_tests, src_off, dst_off,
671 len, ret);
672 failed_tests++; 573 failed_tests++;
673 continue; 574 continue;
674 } 575 }
576 um->bidi_cnt++;
675 } 577 }
676 578
677 if (thread->type == DMA_MEMCPY) 579 if (thread->type == DMA_MEMCPY)
678 tx = dev->device_prep_dma_memcpy(chan, 580 tx = dev->device_prep_dma_memcpy(chan,
679 dma_dsts[0] + dst_off, 581 dsts[0] + dst_off,
680 dma_srcs[0], len, 582 srcs[0], len, flags);
681 flags);
682 else if (thread->type == DMA_XOR) 583 else if (thread->type == DMA_XOR)
683 tx = dev->device_prep_dma_xor(chan, 584 tx = dev->device_prep_dma_xor(chan,
684 dma_dsts[0] + dst_off, 585 dsts[0] + dst_off,
685 dma_srcs, src_cnt, 586 srcs, src_cnt,
686 len, flags); 587 len, flags);
687 else if (thread->type == DMA_PQ) { 588 else if (thread->type == DMA_PQ) {
688 dma_addr_t dma_pq[dst_cnt]; 589 dma_addr_t dma_pq[dst_cnt];
689 590
690 for (i = 0; i < dst_cnt; i++) 591 for (i = 0; i < dst_cnt; i++)
691 dma_pq[i] = dma_dsts[i] + dst_off; 592 dma_pq[i] = dsts[i] + dst_off;
692 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, 593 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
693 src_cnt, pq_coefs, 594 src_cnt, pq_coefs,
694 len, flags); 595 len, flags);
695 } 596 }
696 597
697 if (!tx) { 598 if (!tx) {
698 unmap_src(dev->dev, dma_srcs, len, src_cnt); 599 dmaengine_unmap_put(um);
699 unmap_dst(dev->dev, dma_dsts, params->buf_size, 600 result("prep error", total_tests, src_off,
700 dst_cnt); 601 dst_off, len, ret);
701 thread_result_add(info, result, DMATEST_ET_PREP,
702 total_tests, src_off, dst_off,
703 len, 0);
704 msleep(100); 602 msleep(100);
705 failed_tests++; 603 failed_tests++;
706 continue; 604 continue;
@@ -712,9 +610,9 @@ static int dmatest_func(void *data)
712 cookie = tx->tx_submit(tx); 610 cookie = tx->tx_submit(tx);
713 611
714 if (dma_submit_error(cookie)) { 612 if (dma_submit_error(cookie)) {
715 thread_result_add(info, result, DMATEST_ET_SUBMIT, 613 dmaengine_unmap_put(um);
716 total_tests, src_off, dst_off, 614 result("submit error", total_tests, src_off,
717 len, cookie); 615 dst_off, len, ret);
718 msleep(100); 616 msleep(100);
719 failed_tests++; 617 failed_tests++;
720 continue; 618 continue;
@@ -735,59 +633,59 @@ static int dmatest_func(void *data)
735 * free it this time?" dancing. For now, just 633 * free it this time?" dancing. For now, just
736 * leave it dangling. 634 * leave it dangling.
737 */ 635 */
738 thread_result_add(info, result, DMATEST_ET_TIMEOUT, 636 dmaengine_unmap_put(um);
739 total_tests, src_off, dst_off, 637 result("test timed out", total_tests, src_off, dst_off,
740 len, 0); 638 len, 0);
741 failed_tests++; 639 failed_tests++;
742 continue; 640 continue;
743 } else if (status != DMA_SUCCESS) { 641 } else if (status != DMA_COMPLETE) {
744 enum dmatest_error_type type = (status == DMA_ERROR) ? 642 dmaengine_unmap_put(um);
745 DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; 643 result(status == DMA_ERROR ?
746 thread_result_add(info, result, type, 644 "completion error status" :
747 total_tests, src_off, dst_off, 645 "completion busy status", total_tests, src_off,
748 len, status); 646 dst_off, len, ret);
749 failed_tests++; 647 failed_tests++;
750 continue; 648 continue;
751 } 649 }
752 650
753 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ 651 dmaengine_unmap_put(um);
754 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
755 652
756 error_count = 0; 653 if (params->noverify) {
654 verbose_result("test passed", total_tests, src_off,
655 dst_off, len, 0);
656 continue;
657 }
757 658
758 pr_debug("%s: verifying source buffer...\n", thread_name); 659 pr_debug("%s: verifying source buffer...\n", current->comm);
759 error_count += verify_result_add(info, result, total_tests, 660 error_count = dmatest_verify(thread->srcs, 0, src_off,
760 src_off, dst_off, len, thread->srcs, -1,
761 0, PATTERN_SRC, true); 661 0, PATTERN_SRC, true);
762 error_count += verify_result_add(info, result, total_tests, 662 error_count += dmatest_verify(thread->srcs, src_off,
763 src_off, dst_off, len, thread->srcs, 0, 663 src_off + len, src_off,
764 src_off, PATTERN_SRC | PATTERN_COPY, true); 664 PATTERN_SRC | PATTERN_COPY, true);
765 error_count += verify_result_add(info, result, total_tests, 665 error_count += dmatest_verify(thread->srcs, src_off + len,
766 src_off, dst_off, len, thread->srcs, 1, 666 params->buf_size, src_off + len,
767 src_off + len, PATTERN_SRC, true); 667 PATTERN_SRC, true);
768 668
769 pr_debug("%s: verifying dest buffer...\n", thread_name); 669 pr_debug("%s: verifying dest buffer...\n", current->comm);
770 error_count += verify_result_add(info, result, total_tests, 670 error_count += dmatest_verify(thread->dsts, 0, dst_off,
771 src_off, dst_off, len, thread->dsts, -1,
772 0, PATTERN_DST, false); 671 0, PATTERN_DST, false);
773 error_count += verify_result_add(info, result, total_tests, 672 error_count += dmatest_verify(thread->dsts, dst_off,
774 src_off, dst_off, len, thread->dsts, 0, 673 dst_off + len, src_off,
775 src_off, PATTERN_SRC | PATTERN_COPY, false); 674 PATTERN_SRC | PATTERN_COPY, false);
776 error_count += verify_result_add(info, result, total_tests, 675 error_count += dmatest_verify(thread->dsts, dst_off + len,
777 src_off, dst_off, len, thread->dsts, 1, 676 params->buf_size, dst_off + len,
778 dst_off + len, PATTERN_DST, false); 677 PATTERN_DST, false);
779 678
780 if (error_count) { 679 if (error_count) {
781 thread_result_add(info, result, DMATEST_ET_VERIFY, 680 result("data error", total_tests, src_off, dst_off,
782 total_tests, src_off, dst_off, 681 len, error_count);
783 len, error_count);
784 failed_tests++; 682 failed_tests++;
785 } else { 683 } else {
786 thread_result_add(info, result, DMATEST_ET_OK, 684 verbose_result("test passed", total_tests, src_off,
787 total_tests, src_off, dst_off, 685 dst_off, len, 0);
788 len, 0);
789 } 686 }
790 } 687 }
688 runtime = ktime_us_delta(ktime_get(), ktime);
791 689
792 ret = 0; 690 ret = 0;
793 for (i = 0; thread->dsts[i]; i++) 691 for (i = 0; thread->dsts[i]; i++)
@@ -802,20 +700,17 @@ err_srcbuf:
802err_srcs: 700err_srcs:
803 kfree(pq_coefs); 701 kfree(pq_coefs);
804err_thread_type: 702err_thread_type:
805 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 703 pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n",
806 thread_name, total_tests, failed_tests, ret); 704 current->comm, total_tests, failed_tests,
705 dmatest_persec(runtime, total_tests),
706 dmatest_KBs(runtime, total_len), ret);
807 707
808 /* terminate all transfers on specified channels */ 708 /* terminate all transfers on specified channels */
809 if (ret) 709 if (ret)
810 dmaengine_terminate_all(chan); 710 dmaengine_terminate_all(chan);
811 711
812 thread->done = true; 712 thread->done = true;
813 713 wake_up(&thread_wait);
814 if (params->iterations > 0)
815 while (!kthread_should_stop()) {
816 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
817 interruptible_sleep_on(&wait_dmatest_exit);
818 }
819 714
820 return ret; 715 return ret;
821} 716}
@@ -828,9 +723,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
828 723
829 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { 724 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
830 ret = kthread_stop(thread->task); 725 ret = kthread_stop(thread->task);
831 pr_debug("dmatest: thread %s exited with status %d\n", 726 pr_debug("thread %s exited with status %d\n",
832 thread->task->comm, ret); 727 thread->task->comm, ret);
833 list_del(&thread->node); 728 list_del(&thread->node);
729 put_task_struct(thread->task);
834 kfree(thread); 730 kfree(thread);
835 } 731 }
836 732
@@ -861,27 +757,27 @@ static int dmatest_add_threads(struct dmatest_info *info,
861 for (i = 0; i < params->threads_per_chan; i++) { 757 for (i = 0; i < params->threads_per_chan; i++) {
862 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 758 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
863 if (!thread) { 759 if (!thread) {
864 pr_warning("dmatest: No memory for %s-%s%u\n", 760 pr_warn("No memory for %s-%s%u\n",
865 dma_chan_name(chan), op, i); 761 dma_chan_name(chan), op, i);
866
867 break; 762 break;
868 } 763 }
869 thread->info = info; 764 thread->info = info;
870 thread->chan = dtc->chan; 765 thread->chan = dtc->chan;
871 thread->type = type; 766 thread->type = type;
872 smp_wmb(); 767 smp_wmb();
873 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", 768 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
874 dma_chan_name(chan), op, i); 769 dma_chan_name(chan), op, i);
875 if (IS_ERR(thread->task)) { 770 if (IS_ERR(thread->task)) {
876 pr_warning("dmatest: Failed to run thread %s-%s%u\n", 771 pr_warn("Failed to create thread %s-%s%u\n",
877 dma_chan_name(chan), op, i); 772 dma_chan_name(chan), op, i);
878 kfree(thread); 773 kfree(thread);
879 break; 774 break;
880 } 775 }
881 776
882 /* srcbuf and dstbuf are allocated by the thread itself */ 777 /* srcbuf and dstbuf are allocated by the thread itself */
883 778 get_task_struct(thread->task);
884 list_add_tail(&thread->node, &dtc->threads); 779 list_add_tail(&thread->node, &dtc->threads);
780 wake_up_process(thread->task);
885 } 781 }
886 782
887 return i; 783 return i;
@@ -897,7 +793,7 @@ static int dmatest_add_channel(struct dmatest_info *info,
897 793
898 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); 794 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
899 if (!dtc) { 795 if (!dtc) {
900 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); 796 pr_warn("No memory for %s\n", dma_chan_name(chan));
901 return -ENOMEM; 797 return -ENOMEM;
902 } 798 }
903 799
@@ -917,7 +813,7 @@ static int dmatest_add_channel(struct dmatest_info *info,
917 thread_count += cnt > 0 ? cnt : 0; 813 thread_count += cnt > 0 ? cnt : 0;
918 } 814 }
919 815
920 pr_info("dmatest: Started %u threads using %s\n", 816 pr_info("Started %u threads using %s\n",
921 thread_count, dma_chan_name(chan)); 817 thread_count, dma_chan_name(chan));
922 818
923 list_add_tail(&dtc->node, &info->channels); 819 list_add_tail(&dtc->node, &info->channels);
@@ -937,20 +833,20 @@ static bool filter(struct dma_chan *chan, void *param)
937 return true; 833 return true;
938} 834}
939 835
940static int __run_threaded_test(struct dmatest_info *info) 836static void request_channels(struct dmatest_info *info,
837 enum dma_transaction_type type)
941{ 838{
942 dma_cap_mask_t mask; 839 dma_cap_mask_t mask;
943 struct dma_chan *chan;
944 struct dmatest_params *params = &info->params;
945 int err = 0;
946 840
947 dma_cap_zero(mask); 841 dma_cap_zero(mask);
948 dma_cap_set(DMA_MEMCPY, mask); 842 dma_cap_set(type, mask);
949 for (;;) { 843 for (;;) {
844 struct dmatest_params *params = &info->params;
845 struct dma_chan *chan;
846
950 chan = dma_request_channel(mask, filter, params); 847 chan = dma_request_channel(mask, filter, params);
951 if (chan) { 848 if (chan) {
952 err = dmatest_add_channel(info, chan); 849 if (dmatest_add_channel(info, chan)) {
953 if (err) {
954 dma_release_channel(chan); 850 dma_release_channel(chan);
955 break; /* add_channel failed, punt */ 851 break; /* add_channel failed, punt */
956 } 852 }
@@ -960,22 +856,30 @@ static int __run_threaded_test(struct dmatest_info *info)
960 info->nr_channels >= params->max_channels) 856 info->nr_channels >= params->max_channels)
961 break; /* we have all we need */ 857 break; /* we have all we need */
962 } 858 }
963 return err;
964} 859}
965 860
966#ifndef MODULE 861static void run_threaded_test(struct dmatest_info *info)
967static int run_threaded_test(struct dmatest_info *info)
968{ 862{
969 int ret; 863 struct dmatest_params *params = &info->params;
970 864
971 mutex_lock(&info->lock); 865 /* Copy test parameters */
972 ret = __run_threaded_test(info); 866 params->buf_size = test_buf_size;
973 mutex_unlock(&info->lock); 867 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
974 return ret; 868 strlcpy(params->device, strim(test_device), sizeof(params->device));
869 params->threads_per_chan = threads_per_chan;
870 params->max_channels = max_channels;
871 params->iterations = iterations;
872 params->xor_sources = xor_sources;
873 params->pq_sources = pq_sources;
874 params->timeout = timeout;
875 params->noverify = noverify;
876
877 request_channels(info, DMA_MEMCPY);
878 request_channels(info, DMA_XOR);
879 request_channels(info, DMA_PQ);
975} 880}
976#endif
977 881
978static void __stop_threaded_test(struct dmatest_info *info) 882static void stop_threaded_test(struct dmatest_info *info)
979{ 883{
980 struct dmatest_chan *dtc, *_dtc; 884 struct dmatest_chan *dtc, *_dtc;
981 struct dma_chan *chan; 885 struct dma_chan *chan;
@@ -984,203 +888,86 @@ static void __stop_threaded_test(struct dmatest_info *info)
984 list_del(&dtc->node); 888 list_del(&dtc->node);
985 chan = dtc->chan; 889 chan = dtc->chan;
986 dmatest_cleanup_channel(dtc); 890 dmatest_cleanup_channel(dtc);
987 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); 891 pr_debug("dropped channel %s\n", dma_chan_name(chan));
988 dma_release_channel(chan); 892 dma_release_channel(chan);
989 } 893 }
990 894
991 info->nr_channels = 0; 895 info->nr_channels = 0;
992} 896}
993 897
994static void stop_threaded_test(struct dmatest_info *info) 898static void restart_threaded_test(struct dmatest_info *info, bool run)
995{ 899{
996 mutex_lock(&info->lock); 900 /* we might be called early to set run=, defer running until all
997 __stop_threaded_test(info); 901 * parameters have been evaluated
998 mutex_unlock(&info->lock); 902 */
999} 903 if (!info->did_init)
1000 904 return;
1001static int __restart_threaded_test(struct dmatest_info *info, bool run)
1002{
1003 struct dmatest_params *params = &info->params;
1004 905
1005 /* Stop any running test first */ 906 /* Stop any running test first */
1006 __stop_threaded_test(info); 907 stop_threaded_test(info);
1007
1008 if (run == false)
1009 return 0;
1010
1011 /* Clear results from previous run */
1012 result_free(info, NULL);
1013
1014 /* Copy test parameters */
1015 params->buf_size = test_buf_size;
1016 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
1017 strlcpy(params->device, strim(test_device), sizeof(params->device));
1018 params->threads_per_chan = threads_per_chan;
1019 params->max_channels = max_channels;
1020 params->iterations = iterations;
1021 params->xor_sources = xor_sources;
1022 params->pq_sources = pq_sources;
1023 params->timeout = timeout;
1024 908
1025 /* Run test with new parameters */ 909 /* Run test with new parameters */
1026 return __run_threaded_test(info); 910 run_threaded_test(info);
1027}
1028
1029static bool __is_threaded_test_run(struct dmatest_info *info)
1030{
1031 struct dmatest_chan *dtc;
1032
1033 list_for_each_entry(dtc, &info->channels, node) {
1034 struct dmatest_thread *thread;
1035
1036 list_for_each_entry(thread, &dtc->threads, node) {
1037 if (!thread->done)
1038 return true;
1039 }
1040 }
1041
1042 return false;
1043} 911}
1044 912
1045static ssize_t dtf_read_run(struct file *file, char __user *user_buf, 913static int dmatest_run_get(char *val, const struct kernel_param *kp)
1046 size_t count, loff_t *ppos)
1047{ 914{
1048 struct dmatest_info *info = file->private_data; 915 struct dmatest_info *info = &test_info;
1049 char buf[3];
1050 916
1051 mutex_lock(&info->lock); 917 mutex_lock(&info->lock);
1052 918 if (is_threaded_test_run(info)) {
1053 if (__is_threaded_test_run(info)) { 919 dmatest_run = true;
1054 buf[0] = 'Y';
1055 } else { 920 } else {
1056 __stop_threaded_test(info); 921 stop_threaded_test(info);
1057 buf[0] = 'N'; 922 dmatest_run = false;
1058 } 923 }
1059
1060 mutex_unlock(&info->lock); 924 mutex_unlock(&info->lock);
1061 buf[1] = '\n';
1062 buf[2] = 0x00;
1063 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1064}
1065
1066static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
1067 size_t count, loff_t *ppos)
1068{
1069 struct dmatest_info *info = file->private_data;
1070 char buf[16];
1071 bool bv;
1072 int ret = 0;
1073 925
1074 if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) 926 return param_get_bool(val, kp);
1075 return -EFAULT;
1076
1077 if (strtobool(buf, &bv) == 0) {
1078 mutex_lock(&info->lock);
1079
1080 if (__is_threaded_test_run(info))
1081 ret = -EBUSY;
1082 else
1083 ret = __restart_threaded_test(info, bv);
1084
1085 mutex_unlock(&info->lock);
1086 }
1087
1088 return ret ? ret : count;
1089} 927}
1090 928
1091static const struct file_operations dtf_run_fops = { 929static int dmatest_run_set(const char *val, const struct kernel_param *kp)
1092 .read = dtf_read_run,
1093 .write = dtf_write_run,
1094 .open = simple_open,
1095 .llseek = default_llseek,
1096};
1097
1098static int dtf_results_show(struct seq_file *sf, void *data)
1099{ 930{
1100 struct dmatest_info *info = sf->private; 931 struct dmatest_info *info = &test_info;
1101 struct dmatest_result *result; 932 int ret;
1102 struct dmatest_thread_result *tr;
1103 unsigned int i;
1104 933
1105 mutex_lock(&info->results_lock); 934 mutex_lock(&info->lock);
1106 list_for_each_entry(result, &info->results, node) { 935 ret = param_set_bool(val, kp);
1107 list_for_each_entry(tr, &result->results, node) { 936 if (ret) {
1108 seq_printf(sf, "%s\n", 937 mutex_unlock(&info->lock);
1109 thread_result_get(result->name, tr)); 938 return ret;
1110 if (tr->type == DMATEST_ET_VERIFY_BUF) {
1111 for (i = 0; i < tr->vr->error_count; i++) {
1112 seq_printf(sf, "\t%s\n",
1113 verify_result_get_one(tr->vr, i));
1114 }
1115 }
1116 }
1117 } 939 }
1118 940
1119 mutex_unlock(&info->results_lock); 941 if (is_threaded_test_run(info))
1120 return 0; 942 ret = -EBUSY;
1121} 943 else if (dmatest_run)
1122 944 restart_threaded_test(info, dmatest_run);
1123static int dtf_results_open(struct inode *inode, struct file *file)
1124{
1125 return single_open(file, dtf_results_show, inode->i_private);
1126}
1127
1128static const struct file_operations dtf_results_fops = {
1129 .open = dtf_results_open,
1130 .read = seq_read,
1131 .llseek = seq_lseek,
1132 .release = single_release,
1133};
1134
1135static int dmatest_register_dbgfs(struct dmatest_info *info)
1136{
1137 struct dentry *d;
1138
1139 d = debugfs_create_dir("dmatest", NULL);
1140 if (IS_ERR(d))
1141 return PTR_ERR(d);
1142 if (!d)
1143 goto err_root;
1144 945
1145 info->root = d; 946 mutex_unlock(&info->lock);
1146
1147 /* Run or stop threaded test */
1148 debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
1149 &dtf_run_fops);
1150
1151 /* Results of test in progress */
1152 debugfs_create_file("results", S_IRUGO, info->root, info,
1153 &dtf_results_fops);
1154
1155 return 0;
1156 947
1157err_root: 948 return ret;
1158 pr_err("dmatest: Failed to initialize debugfs\n");
1159 return -ENOMEM;
1160} 949}
1161 950
1162static int __init dmatest_init(void) 951static int __init dmatest_init(void)
1163{ 952{
1164 struct dmatest_info *info = &test_info; 953 struct dmatest_info *info = &test_info;
1165 int ret; 954 struct dmatest_params *params = &info->params;
1166
1167 memset(info, 0, sizeof(*info));
1168 955
1169 mutex_init(&info->lock); 956 if (dmatest_run) {
1170 INIT_LIST_HEAD(&info->channels); 957 mutex_lock(&info->lock);
958 run_threaded_test(info);
959 mutex_unlock(&info->lock);
960 }
1171 961
1172 mutex_init(&info->results_lock); 962 if (params->iterations && wait)
1173 INIT_LIST_HEAD(&info->results); 963 wait_event(thread_wait, !is_threaded_test_run(info));
1174 964
1175 ret = dmatest_register_dbgfs(info); 965 /* module parameters are stable, inittime tests are started,
1176 if (ret) 966 * let userspace take over 'run' control
1177 return ret; 967 */
968 info->did_init = true;
1178 969
1179#ifdef MODULE
1180 return 0; 970 return 0;
1181#else
1182 return run_threaded_test(info);
1183#endif
1184} 971}
1185/* when compiled-in wait for drivers to load first */ 972/* when compiled-in wait for drivers to load first */
1186late_initcall(dmatest_init); 973late_initcall(dmatest_init);
@@ -1189,9 +976,9 @@ static void __exit dmatest_exit(void)
1189{ 976{
1190 struct dmatest_info *info = &test_info; 977 struct dmatest_info *info = &test_info;
1191 978
1192 debugfs_remove_recursive(info->root); 979 mutex_lock(&info->lock);
1193 stop_threaded_test(info); 980 stop_threaded_test(info);
1194 result_free(info, NULL); 981 mutex_unlock(&info->lock);
1195} 982}
1196module_exit(dmatest_exit); 983module_exit(dmatest_exit);
1197 984
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 89eb89f22284..7516be4677cf 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan)
85{ 85{
86 return &chan->dev->device; 86 return &chan->dev->device;
87} 87}
88static struct device *chan2parent(struct dma_chan *chan)
89{
90 return chan->dev->device.parent;
91}
92 88
93static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 89static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
94{ 90{
@@ -311,26 +307,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
311 list_splice_init(&desc->tx_list, &dwc->free_list); 307 list_splice_init(&desc->tx_list, &dwc->free_list);
312 list_move(&desc->desc_node, &dwc->free_list); 308 list_move(&desc->desc_node, &dwc->free_list);
313 309
314 if (!is_slave_direction(dwc->direction)) { 310 dma_descriptor_unmap(txd);
315 struct device *parent = chan2parent(&dwc->chan);
316 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
317 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
318 dma_unmap_single(parent, desc->lli.dar,
319 desc->total_len, DMA_FROM_DEVICE);
320 else
321 dma_unmap_page(parent, desc->lli.dar,
322 desc->total_len, DMA_FROM_DEVICE);
323 }
324 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
325 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
326 dma_unmap_single(parent, desc->lli.sar,
327 desc->total_len, DMA_TO_DEVICE);
328 else
329 dma_unmap_page(parent, desc->lli.sar,
330 desc->total_len, DMA_TO_DEVICE);
331 }
332 }
333
334 spin_unlock_irqrestore(&dwc->lock, flags); 311 spin_unlock_irqrestore(&dwc->lock, flags);
335 312
336 if (callback) 313 if (callback)
@@ -1098,13 +1075,13 @@ dwc_tx_status(struct dma_chan *chan,
1098 enum dma_status ret; 1075 enum dma_status ret;
1099 1076
1100 ret = dma_cookie_status(chan, cookie, txstate); 1077 ret = dma_cookie_status(chan, cookie, txstate);
1101 if (ret == DMA_SUCCESS) 1078 if (ret == DMA_COMPLETE)
1102 return ret; 1079 return ret;
1103 1080
1104 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1081 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1105 1082
1106 ret = dma_cookie_status(chan, cookie, txstate); 1083 ret = dma_cookie_status(chan, cookie, txstate);
1107 if (ret != DMA_SUCCESS) 1084 if (ret != DMA_COMPLETE)
1108 dma_set_residue(txstate, dwc_get_residue(dwc)); 1085 dma_set_residue(txstate, dwc_get_residue(dwc));
1109 1086
1110 if (dwc->paused && ret == DMA_IN_PROGRESS) 1087 if (dwc->paused && ret == DMA_IN_PROGRESS)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index bef8a368c8dd..2539ea0cbc63 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -46,14 +46,21 @@
46#define EDMA_CHANS 64 46#define EDMA_CHANS 64
47#endif /* CONFIG_ARCH_DAVINCI_DA8XX */ 47#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
48 48
49/* Max of 16 segments per channel to conserve PaRAM slots */ 49/*
50#define MAX_NR_SG 16 50 * Max of 20 segments per channel to conserve PaRAM slots
51 * Also note that MAX_NR_SG should be atleast the no.of periods
52 * that are required for ASoC, otherwise DMA prep calls will
53 * fail. Today davinci-pcm is the only user of this driver and
54 * requires atleast 17 slots, so we setup the default to 20.
55 */
56#define MAX_NR_SG 20
51#define EDMA_MAX_SLOTS MAX_NR_SG 57#define EDMA_MAX_SLOTS MAX_NR_SG
52#define EDMA_DESCRIPTORS 16 58#define EDMA_DESCRIPTORS 16
53 59
54struct edma_desc { 60struct edma_desc {
55 struct virt_dma_desc vdesc; 61 struct virt_dma_desc vdesc;
56 struct list_head node; 62 struct list_head node;
63 int cyclic;
57 int absync; 64 int absync;
58 int pset_nr; 65 int pset_nr;
59 int processed; 66 int processed;
@@ -167,8 +174,13 @@ static void edma_execute(struct edma_chan *echan)
167 * then setup a link to the dummy slot, this results in all future 174 * then setup a link to the dummy slot, this results in all future
168 * events being absorbed and that's OK because we're done 175 * events being absorbed and that's OK because we're done
169 */ 176 */
170 if (edesc->processed == edesc->pset_nr) 177 if (edesc->processed == edesc->pset_nr) {
171 edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); 178 if (edesc->cyclic)
179 edma_link(echan->slot[nslots-1], echan->slot[1]);
180 else
181 edma_link(echan->slot[nslots-1],
182 echan->ecc->dummy_slot);
183 }
172 184
173 edma_resume(echan->ch_num); 185 edma_resume(echan->ch_num);
174 186
@@ -250,6 +262,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
250 return ret; 262 return ret;
251} 263}
252 264
265/*
266 * A PaRAM set configuration abstraction used by other modes
267 * @chan: Channel who's PaRAM set we're configuring
268 * @pset: PaRAM set to initialize and setup.
269 * @src_addr: Source address of the DMA
270 * @dst_addr: Destination address of the DMA
271 * @burst: In units of dev_width, how much to send
272 * @dev_width: How much is the dev_width
273 * @dma_length: Total length of the DMA transfer
274 * @direction: Direction of the transfer
275 */
276static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
277 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
278 enum dma_slave_buswidth dev_width, unsigned int dma_length,
279 enum dma_transfer_direction direction)
280{
281 struct edma_chan *echan = to_edma_chan(chan);
282 struct device *dev = chan->device->dev;
283 int acnt, bcnt, ccnt, cidx;
284 int src_bidx, dst_bidx, src_cidx, dst_cidx;
285 int absync;
286
287 acnt = dev_width;
288 /*
289 * If the maxburst is equal to the fifo width, use
290 * A-synced transfers. This allows for large contiguous
291 * buffer transfers using only one PaRAM set.
292 */
293 if (burst == 1) {
294 /*
295 * For the A-sync case, bcnt and ccnt are the remainder
296 * and quotient respectively of the division of:
297 * (dma_length / acnt) by (SZ_64K -1). This is so
298 * that in case bcnt over flows, we have ccnt to use.
299 * Note: In A-sync tranfer only, bcntrld is used, but it
300 * only applies for sg_dma_len(sg) >= SZ_64K.
301 * In this case, the best way adopted is- bccnt for the
302 * first frame will be the remainder below. Then for
303 * every successive frame, bcnt will be SZ_64K-1. This
304 * is assured as bcntrld = 0xffff in end of function.
305 */
306 absync = false;
307 ccnt = dma_length / acnt / (SZ_64K - 1);
308 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
309 /*
310 * If bcnt is non-zero, we have a remainder and hence an
311 * extra frame to transfer, so increment ccnt.
312 */
313 if (bcnt)
314 ccnt++;
315 else
316 bcnt = SZ_64K - 1;
317 cidx = acnt;
318 } else {
319 /*
320 * If maxburst is greater than the fifo address_width,
321 * use AB-synced transfers where A count is the fifo
322 * address_width and B count is the maxburst. In this
323 * case, we are limited to transfers of C count frames
324 * of (address_width * maxburst) where C count is limited
325 * to SZ_64K-1. This places an upper bound on the length
326 * of an SG segment that can be handled.
327 */
328 absync = true;
329 bcnt = burst;
330 ccnt = dma_length / (acnt * bcnt);
331 if (ccnt > (SZ_64K - 1)) {
332 dev_err(dev, "Exceeded max SG segment size\n");
333 return -EINVAL;
334 }
335 cidx = acnt * bcnt;
336 }
337
338 if (direction == DMA_MEM_TO_DEV) {
339 src_bidx = acnt;
340 src_cidx = cidx;
341 dst_bidx = 0;
342 dst_cidx = 0;
343 } else if (direction == DMA_DEV_TO_MEM) {
344 src_bidx = 0;
345 src_cidx = 0;
346 dst_bidx = acnt;
347 dst_cidx = cidx;
348 } else {
349 dev_err(dev, "%s: direction not implemented yet\n", __func__);
350 return -EINVAL;
351 }
352
353 pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
354 /* Configure A or AB synchronized transfers */
355 if (absync)
356 pset->opt |= SYNCDIM;
357
358 pset->src = src_addr;
359 pset->dst = dst_addr;
360
361 pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
362 pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
363
364 pset->a_b_cnt = bcnt << 16 | acnt;
365 pset->ccnt = ccnt;
366 /*
367 * Only time when (bcntrld) auto reload is required is for
368 * A-sync case, and in this case, a requirement of reload value
369 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
370 * and then later will be populated by edma_execute.
371 */
372 pset->link_bcntrld = 0xffffffff;
373 return absync;
374}
375
253static struct dma_async_tx_descriptor *edma_prep_slave_sg( 376static struct dma_async_tx_descriptor *edma_prep_slave_sg(
254 struct dma_chan *chan, struct scatterlist *sgl, 377 struct dma_chan *chan, struct scatterlist *sgl,
255 unsigned int sg_len, enum dma_transfer_direction direction, 378 unsigned int sg_len, enum dma_transfer_direction direction,
@@ -258,23 +381,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
258 struct edma_chan *echan = to_edma_chan(chan); 381 struct edma_chan *echan = to_edma_chan(chan);
259 struct device *dev = chan->device->dev; 382 struct device *dev = chan->device->dev;
260 struct edma_desc *edesc; 383 struct edma_desc *edesc;
261 dma_addr_t dev_addr; 384 dma_addr_t src_addr = 0, dst_addr = 0;
262 enum dma_slave_buswidth dev_width; 385 enum dma_slave_buswidth dev_width;
263 u32 burst; 386 u32 burst;
264 struct scatterlist *sg; 387 struct scatterlist *sg;
265 int acnt, bcnt, ccnt, src, dst, cidx; 388 int i, nslots, ret;
266 int src_bidx, dst_bidx, src_cidx, dst_cidx;
267 int i, nslots;
268 389
269 if (unlikely(!echan || !sgl || !sg_len)) 390 if (unlikely(!echan || !sgl || !sg_len))
270 return NULL; 391 return NULL;
271 392
272 if (direction == DMA_DEV_TO_MEM) { 393 if (direction == DMA_DEV_TO_MEM) {
273 dev_addr = echan->cfg.src_addr; 394 src_addr = echan->cfg.src_addr;
274 dev_width = echan->cfg.src_addr_width; 395 dev_width = echan->cfg.src_addr_width;
275 burst = echan->cfg.src_maxburst; 396 burst = echan->cfg.src_maxburst;
276 } else if (direction == DMA_MEM_TO_DEV) { 397 } else if (direction == DMA_MEM_TO_DEV) {
277 dev_addr = echan->cfg.dst_addr; 398 dst_addr = echan->cfg.dst_addr;
278 dev_width = echan->cfg.dst_addr_width; 399 dev_width = echan->cfg.dst_addr_width;
279 burst = echan->cfg.dst_maxburst; 400 burst = echan->cfg.dst_maxburst;
280 } else { 401 } else {
@@ -307,7 +428,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
307 if (echan->slot[i] < 0) { 428 if (echan->slot[i] < 0) {
308 kfree(edesc); 429 kfree(edesc);
309 dev_err(dev, "Failed to allocate slot\n"); 430 dev_err(dev, "Failed to allocate slot\n");
310 kfree(edesc);
311 return NULL; 431 return NULL;
312 } 432 }
313 } 433 }
@@ -315,64 +435,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
315 435
316 /* Configure PaRAM sets for each SG */ 436 /* Configure PaRAM sets for each SG */
317 for_each_sg(sgl, sg, sg_len, i) { 437 for_each_sg(sgl, sg, sg_len, i) {
318 438 /* Get address for each SG */
319 acnt = dev_width; 439 if (direction == DMA_DEV_TO_MEM)
320 440 dst_addr = sg_dma_address(sg);
321 /* 441 else
322 * If the maxburst is equal to the fifo width, use 442 src_addr = sg_dma_address(sg);
323 * A-synced transfers. This allows for large contiguous 443
324 * buffer transfers using only one PaRAM set. 444 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
325 */ 445 dst_addr, burst, dev_width,
326 if (burst == 1) { 446 sg_dma_len(sg), direction);
327 edesc->absync = false; 447 if (ret < 0) {
328 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); 448 kfree(edesc);
329 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); 449 return NULL;
330 if (bcnt)
331 ccnt++;
332 else
333 bcnt = SZ_64K - 1;
334 cidx = acnt;
335 /*
336 * If maxburst is greater than the fifo address_width,
337 * use AB-synced transfers where A count is the fifo
338 * address_width and B count is the maxburst. In this
339 * case, we are limited to transfers of C count frames
340 * of (address_width * maxburst) where C count is limited
341 * to SZ_64K-1. This places an upper bound on the length
342 * of an SG segment that can be handled.
343 */
344 } else {
345 edesc->absync = true;
346 bcnt = burst;
347 ccnt = sg_dma_len(sg) / (acnt * bcnt);
348 if (ccnt > (SZ_64K - 1)) {
349 dev_err(dev, "Exceeded max SG segment size\n");
350 kfree(edesc);
351 return NULL;
352 }
353 cidx = acnt * bcnt;
354 } 450 }
355 451
356 if (direction == DMA_MEM_TO_DEV) { 452 edesc->absync = ret;
357 src = sg_dma_address(sg);
358 dst = dev_addr;
359 src_bidx = acnt;
360 src_cidx = cidx;
361 dst_bidx = 0;
362 dst_cidx = 0;
363 } else {
364 src = dev_addr;
365 dst = sg_dma_address(sg);
366 src_bidx = 0;
367 src_cidx = 0;
368 dst_bidx = acnt;
369 dst_cidx = cidx;
370 }
371
372 edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
373 /* Configure A or AB synchronized transfers */
374 if (edesc->absync)
375 edesc->pset[i].opt |= SYNCDIM;
376 453
377 /* If this is the last in a current SG set of transactions, 454 /* If this is the last in a current SG set of transactions,
378 enable interrupts so that next set is processed */ 455 enable interrupts so that next set is processed */
@@ -382,17 +459,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
382 /* If this is the last set, enable completion interrupt flag */ 459 /* If this is the last set, enable completion interrupt flag */
383 if (i == sg_len - 1) 460 if (i == sg_len - 1)
384 edesc->pset[i].opt |= TCINTEN; 461 edesc->pset[i].opt |= TCINTEN;
462 }
385 463
386 edesc->pset[i].src = src; 464 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
387 edesc->pset[i].dst = dst; 465}
388 466
389 edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; 467static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
390 edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; 468 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
469 size_t period_len, enum dma_transfer_direction direction,
470 unsigned long tx_flags, void *context)
471{
472 struct edma_chan *echan = to_edma_chan(chan);
473 struct device *dev = chan->device->dev;
474 struct edma_desc *edesc;
475 dma_addr_t src_addr, dst_addr;
476 enum dma_slave_buswidth dev_width;
477 u32 burst;
478 int i, ret, nslots;
479
480 if (unlikely(!echan || !buf_len || !period_len))
481 return NULL;
482
483 if (direction == DMA_DEV_TO_MEM) {
484 src_addr = echan->cfg.src_addr;
485 dst_addr = buf_addr;
486 dev_width = echan->cfg.src_addr_width;
487 burst = echan->cfg.src_maxburst;
488 } else if (direction == DMA_MEM_TO_DEV) {
489 src_addr = buf_addr;
490 dst_addr = echan->cfg.dst_addr;
491 dev_width = echan->cfg.dst_addr_width;
492 burst = echan->cfg.dst_maxburst;
493 } else {
494 dev_err(dev, "%s: bad direction?\n", __func__);
495 return NULL;
496 }
497
498 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
499 dev_err(dev, "Undefined slave buswidth\n");
500 return NULL;
501 }
502
503 if (unlikely(buf_len % period_len)) {
504 dev_err(dev, "Period should be multiple of Buffer length\n");
505 return NULL;
506 }
507
508 nslots = (buf_len / period_len) + 1;
509
510 /*
511 * Cyclic DMA users such as audio cannot tolerate delays introduced
512 * by cases where the number of periods is more than the maximum
513 * number of SGs the EDMA driver can handle at a time. For DMA types
514 * such as Slave SGs, such delays are tolerable and synchronized,
515 * but the synchronization is difficult to achieve with Cyclic and
516 * cannot be guaranteed, so we error out early.
517 */
518 if (nslots > MAX_NR_SG)
519 return NULL;
520
521 edesc = kzalloc(sizeof(*edesc) + nslots *
522 sizeof(edesc->pset[0]), GFP_ATOMIC);
523 if (!edesc) {
524 dev_dbg(dev, "Failed to allocate a descriptor\n");
525 return NULL;
526 }
527
528 edesc->cyclic = 1;
529 edesc->pset_nr = nslots;
530
531 dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots);
532 dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len);
533 dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
534
535 for (i = 0; i < nslots; i++) {
536 /* Allocate a PaRAM slot, if needed */
537 if (echan->slot[i] < 0) {
538 echan->slot[i] =
539 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
540 EDMA_SLOT_ANY);
541 if (echan->slot[i] < 0) {
542 dev_err(dev, "Failed to allocate slot\n");
543 return NULL;
544 }
545 }
546
547 if (i == nslots - 1) {
548 memcpy(&edesc->pset[i], &edesc->pset[0],
549 sizeof(edesc->pset[0]));
550 break;
551 }
552
553 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
554 dst_addr, burst, dev_width, period_len,
555 direction);
556 if (ret < 0)
557 return NULL;
391 558
392 edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; 559 if (direction == DMA_DEV_TO_MEM)
393 edesc->pset[i].ccnt = ccnt; 560 dst_addr += period_len;
394 edesc->pset[i].link_bcntrld = 0xffffffff; 561 else
562 src_addr += period_len;
395 563
564 dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
565 dev_dbg(dev,
566 "\n pset[%d]:\n"
567 " chnum\t%d\n"
568 " slot\t%d\n"
569 " opt\t%08x\n"
570 " src\t%08x\n"
571 " dst\t%08x\n"
572 " abcnt\t%08x\n"
573 " ccnt\t%08x\n"
574 " bidx\t%08x\n"
575 " cidx\t%08x\n"
576 " lkrld\t%08x\n",
577 i, echan->ch_num, echan->slot[i],
578 edesc->pset[i].opt,
579 edesc->pset[i].src,
580 edesc->pset[i].dst,
581 edesc->pset[i].a_b_cnt,
582 edesc->pset[i].ccnt,
583 edesc->pset[i].src_dst_bidx,
584 edesc->pset[i].src_dst_cidx,
585 edesc->pset[i].link_bcntrld);
586
587 edesc->absync = ret;
588
589 /*
590 * Enable interrupts for every period because callback
591 * has to be called for every period.
592 */
593 edesc->pset[i].opt |= TCINTEN;
396 } 594 }
397 595
398 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 596 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -406,30 +604,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
406 unsigned long flags; 604 unsigned long flags;
407 struct edmacc_param p; 605 struct edmacc_param p;
408 606
409 /* Pause the channel */ 607 edesc = echan->edesc;
410 edma_pause(echan->ch_num); 608
609 /* Pause the channel for non-cyclic */
610 if (!edesc || (edesc && !edesc->cyclic))
611 edma_pause(echan->ch_num);
411 612
412 switch (ch_status) { 613 switch (ch_status) {
413 case DMA_COMPLETE: 614 case EDMA_DMA_COMPLETE:
414 spin_lock_irqsave(&echan->vchan.lock, flags); 615 spin_lock_irqsave(&echan->vchan.lock, flags);
415 616
416 edesc = echan->edesc;
417 if (edesc) { 617 if (edesc) {
418 if (edesc->processed == edesc->pset_nr) { 618 if (edesc->cyclic) {
619 vchan_cyclic_callback(&edesc->vdesc);
620 } else if (edesc->processed == edesc->pset_nr) {
419 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); 621 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
420 edma_stop(echan->ch_num); 622 edma_stop(echan->ch_num);
421 vchan_cookie_complete(&edesc->vdesc); 623 vchan_cookie_complete(&edesc->vdesc);
624 edma_execute(echan);
422 } else { 625 } else {
423 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); 626 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
627 edma_execute(echan);
424 } 628 }
425
426 edma_execute(echan);
427 } 629 }
428 630
429 spin_unlock_irqrestore(&echan->vchan.lock, flags); 631 spin_unlock_irqrestore(&echan->vchan.lock, flags);
430 632
431 break; 633 break;
432 case DMA_CC_ERROR: 634 case EDMA_DMA_CC_ERROR:
433 spin_lock_irqsave(&echan->vchan.lock, flags); 635 spin_lock_irqsave(&echan->vchan.lock, flags);
434 636
435 edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); 637 edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
@@ -579,7 +781,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
579 unsigned long flags; 781 unsigned long flags;
580 782
581 ret = dma_cookie_status(chan, cookie, txstate); 783 ret = dma_cookie_status(chan, cookie, txstate);
582 if (ret == DMA_SUCCESS || !txstate) 784 if (ret == DMA_COMPLETE || !txstate)
583 return ret; 785 return ret;
584 786
585 spin_lock_irqsave(&echan->vchan.lock, flags); 787 spin_lock_irqsave(&echan->vchan.lock, flags);
@@ -619,6 +821,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
619 struct device *dev) 821 struct device *dev)
620{ 822{
621 dma->device_prep_slave_sg = edma_prep_slave_sg; 823 dma->device_prep_slave_sg = edma_prep_slave_sg;
824 dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
622 dma->device_alloc_chan_resources = edma_alloc_chan_resources; 825 dma->device_alloc_chan_resources = edma_alloc_chan_resources;
623 dma->device_free_chan_resources = edma_free_chan_resources; 826 dma->device_free_chan_resources = edma_free_chan_resources;
624 dma->device_issue_pending = edma_issue_pending; 827 dma->device_issue_pending = edma_issue_pending;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 591cd8c63abb..cb4bf682a708 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
733 spin_unlock_irqrestore(&edmac->lock, flags); 733 spin_unlock_irqrestore(&edmac->lock, flags);
734} 734}
735 735
736static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
737{
738 struct device *dev = desc->txd.chan->device->dev;
739
740 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
741 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
742 dma_unmap_single(dev, desc->src_addr, desc->size,
743 DMA_TO_DEVICE);
744 else
745 dma_unmap_page(dev, desc->src_addr, desc->size,
746 DMA_TO_DEVICE);
747 }
748 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
749 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
750 dma_unmap_single(dev, desc->dst_addr, desc->size,
751 DMA_FROM_DEVICE);
752 else
753 dma_unmap_page(dev, desc->dst_addr, desc->size,
754 DMA_FROM_DEVICE);
755 }
756}
757
758static void ep93xx_dma_tasklet(unsigned long data) 736static void ep93xx_dma_tasklet(unsigned long data)
759{ 737{
760 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; 738 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
@@ -787,13 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
787 765
788 /* Now we can release all the chained descriptors */ 766 /* Now we can release all the chained descriptors */
789 list_for_each_entry_safe(desc, d, &list, node) { 767 list_for_each_entry_safe(desc, d, &list, node) {
790 /* 768 dma_descriptor_unmap(&desc->txd);
791 * For the memcpy channels the API requires us to unmap the
792 * buffers unless requested otherwise.
793 */
794 if (!edmac->chan.private)
795 ep93xx_dma_unmap_buffers(desc);
796
797 ep93xx_dma_desc_put(edmac, desc); 769 ep93xx_dma_desc_put(edmac, desc);
798 } 770 }
799 771
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 61517dd0d0b7..7086a16a55f2 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -870,22 +870,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
870 /* Run any dependencies */ 870 /* Run any dependencies */
871 dma_run_dependencies(txd); 871 dma_run_dependencies(txd);
872 872
873 /* Unmap the dst buffer, if requested */ 873 dma_descriptor_unmap(txd);
874 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
875 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
876 dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
877 else
878 dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
879 }
880
881 /* Unmap the src buffer, if requested */
882 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
883 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
884 dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
885 else
886 dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
887 }
888
889#ifdef FSL_DMA_LD_DEBUG 874#ifdef FSL_DMA_LD_DEBUG
890 chan_dbg(chan, "LD %p free\n", desc); 875 chan_dbg(chan, "LD %p free\n", desc);
891#endif 876#endif
@@ -1255,7 +1240,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1255 WARN_ON(fdev->feature != chan->feature); 1240 WARN_ON(fdev->feature != chan->feature);
1256 1241
1257 chan->dev = fdev->dev; 1242 chan->dev = fdev->dev;
1258 chan->id = ((res.start - 0x100) & 0xfff) >> 7; 1243 chan->id = (res.start & 0xfff) < 0x300 ?
1244 ((res.start - 0x100) & 0xfff) >> 7 :
1245 ((res.start - 0x200) & 0xfff) >> 7;
1259 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { 1246 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1260 dev_err(fdev->dev, "too many channels for device\n"); 1247 dev_err(fdev->dev, "too many channels for device\n");
1261 err = -EINVAL; 1248 err = -EINVAL;
@@ -1428,6 +1415,7 @@ static int fsldma_of_remove(struct platform_device *op)
1428} 1415}
1429 1416
1430static const struct of_device_id fsldma_of_ids[] = { 1417static const struct of_device_id fsldma_of_ids[] = {
1418 { .compatible = "fsl,elo3-dma", },
1431 { .compatible = "fsl,eloplus-dma", }, 1419 { .compatible = "fsl,eloplus-dma", },
1432 { .compatible = "fsl,elo-dma", }, 1420 { .compatible = "fsl,elo-dma", },
1433 {} 1421 {}
@@ -1449,7 +1437,7 @@ static struct platform_driver fsldma_of_driver = {
1449 1437
1450static __init int fsldma_init(void) 1438static __init int fsldma_init(void)
1451{ 1439{
1452 pr_info("Freescale Elo / Elo Plus DMA driver\n"); 1440 pr_info("Freescale Elo series DMA driver\n");
1453 return platform_driver_register(&fsldma_of_driver); 1441 return platform_driver_register(&fsldma_of_driver);
1454} 1442}
1455 1443
@@ -1461,5 +1449,5 @@ static void __exit fsldma_exit(void)
1461subsys_initcall(fsldma_init); 1449subsys_initcall(fsldma_init);
1462module_exit(fsldma_exit); 1450module_exit(fsldma_exit);
1463 1451
1464MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); 1452MODULE_DESCRIPTION("Freescale Elo series DMA driver");
1465MODULE_LICENSE("GPL"); 1453MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index f5c38791fc74..1ffc24484d23 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -112,7 +112,7 @@ struct fsldma_chan_regs {
112}; 112};
113 113
114struct fsldma_chan; 114struct fsldma_chan;
115#define FSL_DMA_MAX_CHANS_PER_DEVICE 4 115#define FSL_DMA_MAX_CHANS_PER_DEVICE 8
116 116
117struct fsldma_device { 117struct fsldma_device {
118 void __iomem *regs; /* DGSR register base */ 118 void __iomem *regs; /* DGSR register base */
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 55852c026791..6f9ac2022abd 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
572 572
573 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); 573 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
574 574
575 dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " 575 dev_dbg(imxdma->dev,
576 "dma_length=%d\n", __func__, imxdmac->channel, 576 "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
577 d->dest, d->src, d->len); 577 __func__, imxdmac->channel,
578 (unsigned long long)d->dest,
579 (unsigned long long)d->src, d->len);
578 580
579 break; 581 break;
580 /* Cyclic transfer is the same as slave_sg with special sg configuration. */ 582 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
@@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
586 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, 588 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
587 DMA_CCR(imxdmac->channel)); 589 DMA_CCR(imxdmac->channel));
588 590
589 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " 591 dev_dbg(imxdma->dev,
590 "total length=%d dev_addr=0x%08x (dev2mem)\n", 592 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
591 __func__, imxdmac->channel, d->sg, d->sgcount, 593 __func__, imxdmac->channel,
592 d->len, imxdmac->per_address); 594 d->sg, d->sgcount, d->len,
595 (unsigned long long)imxdmac->per_address);
593 } else if (d->direction == DMA_MEM_TO_DEV) { 596 } else if (d->direction == DMA_MEM_TO_DEV) {
594 imx_dmav1_writel(imxdma, imxdmac->per_address, 597 imx_dmav1_writel(imxdma, imxdmac->per_address,
595 DMA_DAR(imxdmac->channel)); 598 DMA_DAR(imxdmac->channel));
596 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, 599 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
597 DMA_CCR(imxdmac->channel)); 600 DMA_CCR(imxdmac->channel));
598 601
599 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " 602 dev_dbg(imxdma->dev,
600 "total length=%d dev_addr=0x%08x (mem2dev)\n", 603 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
601 __func__, imxdmac->channel, d->sg, d->sgcount, 604 __func__, imxdmac->channel,
602 d->len, imxdmac->per_address); 605 d->sg, d->sgcount, d->len,
606 (unsigned long long)imxdmac->per_address);
603 } else { 607 } else {
604 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", 608 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
605 __func__, imxdmac->channel); 609 __func__, imxdmac->channel);
@@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
771 desc->desc.tx_submit = imxdma_tx_submit; 775 desc->desc.tx_submit = imxdma_tx_submit;
772 /* txd.flags will be overwritten in prep funcs */ 776 /* txd.flags will be overwritten in prep funcs */
773 desc->desc.flags = DMA_CTRL_ACK; 777 desc->desc.flags = DMA_CTRL_ACK;
774 desc->status = DMA_SUCCESS; 778 desc->status = DMA_COMPLETE;
775 779
776 list_add_tail(&desc->node, &imxdmac->ld_free); 780 list_add_tail(&desc->node, &imxdmac->ld_free);
777 imxdmac->descs_allocated++; 781 imxdmac->descs_allocated++;
@@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
870 int i; 874 int i;
871 unsigned int periods = buf_len / period_len; 875 unsigned int periods = buf_len / period_len;
872 876
873 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", 877 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
874 __func__, imxdmac->channel, buf_len, period_len); 878 __func__, imxdmac->channel, buf_len, period_len);
875 879
876 if (list_empty(&imxdmac->ld_free) || 880 if (list_empty(&imxdmac->ld_free) ||
@@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
926 struct imxdma_engine *imxdma = imxdmac->imxdma; 930 struct imxdma_engine *imxdma = imxdmac->imxdma;
927 struct imxdma_desc *desc; 931 struct imxdma_desc *desc;
928 932
929 dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", 933 dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
930 __func__, imxdmac->channel, src, dest, len); 934 __func__, imxdmac->channel, (unsigned long long)src,
935 (unsigned long long)dest, len);
931 936
932 if (list_empty(&imxdmac->ld_free) || 937 if (list_empty(&imxdmac->ld_free) ||
933 imxdma_chan_is_doing_cyclic(imxdmac)) 938 imxdma_chan_is_doing_cyclic(imxdmac))
@@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
956 struct imxdma_engine *imxdma = imxdmac->imxdma; 961 struct imxdma_engine *imxdma = imxdmac->imxdma;
957 struct imxdma_desc *desc; 962 struct imxdma_desc *desc;
958 963
959 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" 964 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
960 " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, 965 " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
961 imxdmac->channel, xt->src_start, xt->dst_start, 966 imxdmac->channel, (unsigned long long)xt->src_start,
967 (unsigned long long) xt->dst_start,
962 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", 968 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
963 xt->numf, xt->frame_size); 969 xt->numf, xt->frame_size);
964 970
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c1fd504cae28..c75679d42028 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
638 if (error) 638 if (error)
639 sdmac->status = DMA_ERROR; 639 sdmac->status = DMA_ERROR;
640 else 640 else
641 sdmac->status = DMA_SUCCESS; 641 sdmac->status = DMA_COMPLETE;
642 642
643 dma_cookie_complete(&sdmac->desc); 643 dma_cookie_complete(&sdmac->desc);
644 if (sdmac->desc.callback) 644 if (sdmac->desc.callback)
@@ -1089,8 +1089,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1089 param &= ~BD_CONT; 1089 param &= ~BD_CONT;
1090 } 1090 }
1091 1091
1092 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 1092 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1093 i, count, sg->dma_address, 1093 i, count, (u64)sg->dma_address,
1094 param & BD_WRAP ? "wrap" : "", 1094 param & BD_WRAP ? "wrap" : "",
1095 param & BD_INTR ? " intr" : ""); 1095 param & BD_INTR ? " intr" : "");
1096 1096
@@ -1163,8 +1163,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1163 if (i + 1 == num_periods) 1163 if (i + 1 == num_periods)
1164 param |= BD_WRAP; 1164 param |= BD_WRAP;
1165 1165
1166 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 1166 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1167 i, period_len, dma_addr, 1167 i, period_len, (u64)dma_addr,
1168 param & BD_WRAP ? "wrap" : "", 1168 param & BD_WRAP ? "wrap" : "",
1169 param & BD_INTR ? " intr" : ""); 1169 param & BD_INTR ? " intr" : "");
1170 1170
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index a975ebebea8a..1aab8130efa1 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
309 callback_txd(param_txd); 309 callback_txd(param_txd);
310 } 310 }
311 if (midc->raw_tfr) { 311 if (midc->raw_tfr) {
312 desc->status = DMA_SUCCESS; 312 desc->status = DMA_COMPLETE;
313 if (desc->lli != NULL) { 313 if (desc->lli != NULL) {
314 pci_pool_free(desc->lli_pool, desc->lli, 314 pci_pool_free(desc->lli_pool, desc->lli,
315 desc->lli_phys); 315 desc->lli_phys);
@@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
481 enum dma_status ret; 481 enum dma_status ret;
482 482
483 ret = dma_cookie_status(chan, cookie, txstate); 483 ret = dma_cookie_status(chan, cookie, txstate);
484 if (ret != DMA_SUCCESS) { 484 if (ret != DMA_COMPLETE) {
485 spin_lock_bh(&midc->lock); 485 spin_lock_bh(&midc->lock);
486 midc_scan_descriptors(to_middma_device(chan->device), midc); 486 midc_scan_descriptors(to_middma_device(chan->device), midc);
487 spin_unlock_bh(&midc->lock); 487 spin_unlock_bh(&midc->lock);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 5ff6fc1819dc..1a49c777607c 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data)
531 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 531 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
532} 532}
533 533
534void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
535 size_t len, struct ioat_dma_descriptor *hw)
536{
537 struct pci_dev *pdev = chan->device->pdev;
538 size_t offset = len - hw->size;
539
540 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
541 ioat_unmap(pdev, hw->dst_addr - offset, len,
542 PCI_DMA_FROMDEVICE, flags, 1);
543
544 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
545 ioat_unmap(pdev, hw->src_addr - offset, len,
546 PCI_DMA_TODEVICE, flags, 0);
547}
548
549dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) 534dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
550{ 535{
551 dma_addr_t phys_complete; 536 dma_addr_t phys_complete;
@@ -602,7 +587,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
602 dump_desc_dbg(ioat, desc); 587 dump_desc_dbg(ioat, desc);
603 if (tx->cookie) { 588 if (tx->cookie) {
604 dma_cookie_complete(tx); 589 dma_cookie_complete(tx);
605 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 590 dma_descriptor_unmap(tx);
606 ioat->active -= desc->hw->tx_cnt; 591 ioat->active -= desc->hw->tx_cnt;
607 if (tx->callback) { 592 if (tx->callback) {
608 tx->callback(tx->callback_param); 593 tx->callback(tx->callback_param);
@@ -733,7 +718,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
733 enum dma_status ret; 718 enum dma_status ret;
734 719
735 ret = dma_cookie_status(c, cookie, txstate); 720 ret = dma_cookie_status(c, cookie, txstate);
736 if (ret == DMA_SUCCESS) 721 if (ret == DMA_COMPLETE)
737 return ret; 722 return ret;
738 723
739 device->cleanup_fn((unsigned long) c); 724 device->cleanup_fn((unsigned long) c);
@@ -833,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
833 818
834 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
835 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 820 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
836 flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | 821 flags = DMA_PREP_INTERRUPT;
837 DMA_PREP_INTERRUPT;
838 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, 822 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
839 IOAT_TEST_SIZE, flags); 823 IOAT_TEST_SIZE, flags);
840 if (!tx) { 824 if (!tx) {
@@ -859,7 +843,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
859 843
860 if (tmo == 0 || 844 if (tmo == 0 ||
861 dma->device_tx_status(dma_chan, cookie, NULL) 845 dma->device_tx_status(dma_chan, cookie, NULL)
862 != DMA_SUCCESS) { 846 != DMA_COMPLETE) {
863 dev_err(dev, "Self-test copy timed out, disabling\n"); 847 dev_err(dev, "Self-test copy timed out, disabling\n");
864 err = -ENODEV; 848 err = -ENODEV;
865 goto unmap_dma; 849 goto unmap_dma;
@@ -885,8 +869,7 @@ static char ioat_interrupt_style[32] = "msix";
885module_param_string(ioat_interrupt_style, ioat_interrupt_style, 869module_param_string(ioat_interrupt_style, ioat_interrupt_style,
886 sizeof(ioat_interrupt_style), 0644); 870 sizeof(ioat_interrupt_style), 0644);
887MODULE_PARM_DESC(ioat_interrupt_style, 871MODULE_PARM_DESC(ioat_interrupt_style,
888 "set ioat interrupt style: msix (default), " 872 "set ioat interrupt style: msix (default), msi, intx");
889 "msix-single-vector, msi, intx)");
890 873
891/** 874/**
892 * ioat_dma_setup_interrupts - setup interrupt handler 875 * ioat_dma_setup_interrupts - setup interrupt handler
@@ -904,8 +887,6 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device)
904 887
905 if (!strcmp(ioat_interrupt_style, "msix")) 888 if (!strcmp(ioat_interrupt_style, "msix"))
906 goto msix; 889 goto msix;
907 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
908 goto msix_single_vector;
909 if (!strcmp(ioat_interrupt_style, "msi")) 890 if (!strcmp(ioat_interrupt_style, "msi"))
910 goto msi; 891 goto msi;
911 if (!strcmp(ioat_interrupt_style, "intx")) 892 if (!strcmp(ioat_interrupt_style, "intx"))
@@ -920,10 +901,8 @@ msix:
920 device->msix_entries[i].entry = i; 901 device->msix_entries[i].entry = i;
921 902
922 err = pci_enable_msix(pdev, device->msix_entries, msixcnt); 903 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
923 if (err < 0) 904 if (err)
924 goto msi; 905 goto msi;
925 if (err > 0)
926 goto msix_single_vector;
927 906
928 for (i = 0; i < msixcnt; i++) { 907 for (i = 0; i < msixcnt; i++) {
929 msix = &device->msix_entries[i]; 908 msix = &device->msix_entries[i];
@@ -937,29 +916,13 @@ msix:
937 chan = ioat_chan_by_index(device, j); 916 chan = ioat_chan_by_index(device, j);
938 devm_free_irq(dev, msix->vector, chan); 917 devm_free_irq(dev, msix->vector, chan);
939 } 918 }
940 goto msix_single_vector; 919 goto msi;
941 } 920 }
942 } 921 }
943 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; 922 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
944 device->irq_mode = IOAT_MSIX; 923 device->irq_mode = IOAT_MSIX;
945 goto done; 924 goto done;
946 925
947msix_single_vector:
948 msix = &device->msix_entries[0];
949 msix->entry = 0;
950 err = pci_enable_msix(pdev, device->msix_entries, 1);
951 if (err)
952 goto msi;
953
954 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
955 "ioat-msix", device);
956 if (err) {
957 pci_disable_msix(pdev);
958 goto msi;
959 }
960 device->irq_mode = IOAT_MSIX_SINGLE;
961 goto done;
962
963msi: 926msi:
964 err = pci_enable_msi(pdev); 927 err = pci_enable_msi(pdev);
965 if (err) 928 if (err)
@@ -971,7 +934,7 @@ msi:
971 pci_disable_msi(pdev); 934 pci_disable_msi(pdev);
972 goto intx; 935 goto intx;
973 } 936 }
974 device->irq_mode = IOAT_MSIX; 937 device->irq_mode = IOAT_MSI;
975 goto done; 938 goto done;
976 939
977intx: 940intx:
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 54fb7b9ff9aa..11fb877ddca9 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -52,7 +52,6 @@
52enum ioat_irq_mode { 52enum ioat_irq_mode {
53 IOAT_NOIRQ = 0, 53 IOAT_NOIRQ = 0,
54 IOAT_MSIX, 54 IOAT_MSIX,
55 IOAT_MSIX_SINGLE,
56 IOAT_MSI, 55 IOAT_MSI,
57 IOAT_INTX 56 IOAT_INTX
58}; 57};
@@ -83,7 +82,6 @@ struct ioatdma_device {
83 struct pci_pool *completion_pool; 82 struct pci_pool *completion_pool;
84#define MAX_SED_POOLS 5 83#define MAX_SED_POOLS 5
85 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; 84 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
86 struct kmem_cache *sed_pool;
87 struct dma_device common; 85 struct dma_device common;
88 u8 version; 86 u8 version;
89 struct msix_entry msix_entries[4]; 87 struct msix_entry msix_entries[4];
@@ -342,16 +340,6 @@ static inline bool is_ioat_bug(unsigned long err)
342 return !!err; 340 return !!err;
343} 341}
344 342
345static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
346 int direction, enum dma_ctrl_flags flags, bool dst)
347{
348 if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
349 (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
350 pci_unmap_single(pdev, addr, len, direction);
351 else
352 pci_unmap_page(pdev, addr, len, direction);
353}
354
355int ioat_probe(struct ioatdma_device *device); 343int ioat_probe(struct ioatdma_device *device);
356int ioat_register(struct ioatdma_device *device); 344int ioat_register(struct ioatdma_device *device);
357int ioat1_dma_probe(struct ioatdma_device *dev, int dca); 345int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
@@ -363,8 +351,6 @@ void ioat_init_channel(struct ioatdma_device *device,
363 struct ioat_chan_common *chan, int idx); 351 struct ioat_chan_common *chan, int idx);
364enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 352enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
365 struct dma_tx_state *txstate); 353 struct dma_tx_state *txstate);
366void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
367 size_t len, struct ioat_dma_descriptor *hw);
368bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 354bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
369 dma_addr_t *phys_complete); 355 dma_addr_t *phys_complete);
370void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); 356void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b925e1b1d139..5d3affe7e976 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -148,7 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
148 tx = &desc->txd; 148 tx = &desc->txd;
149 dump_desc_dbg(ioat, desc); 149 dump_desc_dbg(ioat, desc);
150 if (tx->cookie) { 150 if (tx->cookie) {
151 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 151 dma_descriptor_unmap(tx);
152 dma_cookie_complete(tx); 152 dma_cookie_complete(tx);
153 if (tx->callback) { 153 if (tx->callback) {
154 tx->callback(tx->callback_param); 154 tx->callback(tx->callback_param);
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 212d584fe427..470292767e68 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -157,7 +157,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
157 157
158int ioat2_dma_probe(struct ioatdma_device *dev, int dca); 158int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
159int ioat3_dma_probe(struct ioatdma_device *dev, int dca); 159int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
160void ioat3_dma_remove(struct ioatdma_device *dev);
161struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 160struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
162struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); 161struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
163int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); 162int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d8ececaf1b57..820817e97e62 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -67,6 +67,8 @@
67#include "dma.h" 67#include "dma.h"
68#include "dma_v2.h" 68#include "dma_v2.h"
69 69
70extern struct kmem_cache *ioat3_sed_cache;
71
70/* ioat hardware assumes at least two sources for raid operations */ 72/* ioat hardware assumes at least two sources for raid operations */
71#define src_cnt_to_sw(x) ((x) + 2) 73#define src_cnt_to_sw(x) ((x) + 2)
72#define src_cnt_to_hw(x) ((x) - 2) 74#define src_cnt_to_hw(x) ((x) - 2)
@@ -87,22 +89,8 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
87static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, 89static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
88 0, 1, 2, 3, 4, 5, 6 }; 90 0, 1, 2, 3, 4, 5, 6 };
89 91
90/*
91 * technically sources 1 and 2 do not require SED, but the op will have
92 * at least 9 descriptors so that's irrelevant.
93 */
94static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 1, 1, 1, 1, 1, 1, 1 };
96
97static void ioat3_eh(struct ioat2_dma_chan *ioat); 92static void ioat3_eh(struct ioat2_dma_chan *ioat);
98 93
99static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
100{
101 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
102
103 return raw->field[xor_idx_to_field[idx]];
104}
105
106static void xor_set_src(struct ioat_raw_descriptor *descs[2], 94static void xor_set_src(struct ioat_raw_descriptor *descs[2],
107 dma_addr_t addr, u32 offset, int idx) 95 dma_addr_t addr, u32 offset, int idx)
108{ 96{
@@ -135,12 +123,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2],
135 pq->coef[idx] = coef; 123 pq->coef[idx] = coef;
136} 124}
137 125
138static int sed_get_pq16_pool_idx(int src_cnt)
139{
140
141 return pq16_idx_to_sed[src_cnt];
142}
143
144static bool is_jf_ioat(struct pci_dev *pdev) 126static bool is_jf_ioat(struct pci_dev *pdev)
145{ 127{
146 switch (pdev->device) { 128 switch (pdev->device) {
@@ -272,7 +254,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
272 struct ioat_sed_ent *sed; 254 struct ioat_sed_ent *sed;
273 gfp_t flags = __GFP_ZERO | GFP_ATOMIC; 255 gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
274 256
275 sed = kmem_cache_alloc(device->sed_pool, flags); 257 sed = kmem_cache_alloc(ioat3_sed_cache, flags);
276 if (!sed) 258 if (!sed)
277 return NULL; 259 return NULL;
278 260
@@ -280,7 +262,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
280 sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], 262 sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
281 flags, &sed->dma); 263 flags, &sed->dma);
282 if (!sed->hw) { 264 if (!sed->hw) {
283 kmem_cache_free(device->sed_pool, sed); 265 kmem_cache_free(ioat3_sed_cache, sed);
284 return NULL; 266 return NULL;
285 } 267 }
286 268
@@ -293,165 +275,7 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s
293 return; 275 return;
294 276
295 dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); 277 dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
296 kmem_cache_free(device->sed_pool, sed); 278 kmem_cache_free(ioat3_sed_cache, sed);
297}
298
299static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
300 struct ioat_ring_ent *desc, int idx)
301{
302 struct ioat_chan_common *chan = &ioat->base;
303 struct pci_dev *pdev = chan->device->pdev;
304 size_t len = desc->len;
305 size_t offset = len - desc->hw->size;
306 struct dma_async_tx_descriptor *tx = &desc->txd;
307 enum dma_ctrl_flags flags = tx->flags;
308
309 switch (desc->hw->ctl_f.op) {
310 case IOAT_OP_COPY:
311 if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
312 ioat_dma_unmap(chan, flags, len, desc->hw);
313 break;
314 case IOAT_OP_XOR_VAL:
315 case IOAT_OP_XOR: {
316 struct ioat_xor_descriptor *xor = desc->xor;
317 struct ioat_ring_ent *ext;
318 struct ioat_xor_ext_descriptor *xor_ex = NULL;
319 int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
320 struct ioat_raw_descriptor *descs[2];
321 int i;
322
323 if (src_cnt > 5) {
324 ext = ioat2_get_ring_ent(ioat, idx + 1);
325 xor_ex = ext->xor_ex;
326 }
327
328 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
329 descs[0] = (struct ioat_raw_descriptor *) xor;
330 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
331 for (i = 0; i < src_cnt; i++) {
332 dma_addr_t src = xor_get_src(descs, i);
333
334 ioat_unmap(pdev, src - offset, len,
335 PCI_DMA_TODEVICE, flags, 0);
336 }
337
338 /* dest is a source in xor validate operations */
339 if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
340 ioat_unmap(pdev, xor->dst_addr - offset, len,
341 PCI_DMA_TODEVICE, flags, 1);
342 break;
343 }
344 }
345
346 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
347 ioat_unmap(pdev, xor->dst_addr - offset, len,
348 PCI_DMA_FROMDEVICE, flags, 1);
349 break;
350 }
351 case IOAT_OP_PQ_VAL:
352 case IOAT_OP_PQ: {
353 struct ioat_pq_descriptor *pq = desc->pq;
354 struct ioat_ring_ent *ext;
355 struct ioat_pq_ext_descriptor *pq_ex = NULL;
356 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
357 struct ioat_raw_descriptor *descs[2];
358 int i;
359
360 if (src_cnt > 3) {
361 ext = ioat2_get_ring_ent(ioat, idx + 1);
362 pq_ex = ext->pq_ex;
363 }
364
365 /* in the 'continue' case don't unmap the dests as sources */
366 if (dmaf_p_disabled_continue(flags))
367 src_cnt--;
368 else if (dmaf_continue(flags))
369 src_cnt -= 3;
370
371 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
372 descs[0] = (struct ioat_raw_descriptor *) pq;
373 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
374 for (i = 0; i < src_cnt; i++) {
375 dma_addr_t src = pq_get_src(descs, i);
376
377 ioat_unmap(pdev, src - offset, len,
378 PCI_DMA_TODEVICE, flags, 0);
379 }
380
381 /* the dests are sources in pq validate operations */
382 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
383 if (!(flags & DMA_PREP_PQ_DISABLE_P))
384 ioat_unmap(pdev, pq->p_addr - offset,
385 len, PCI_DMA_TODEVICE, flags, 0);
386 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
387 ioat_unmap(pdev, pq->q_addr - offset,
388 len, PCI_DMA_TODEVICE, flags, 0);
389 break;
390 }
391 }
392
393 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
394 if (!(flags & DMA_PREP_PQ_DISABLE_P))
395 ioat_unmap(pdev, pq->p_addr - offset, len,
396 PCI_DMA_BIDIRECTIONAL, flags, 1);
397 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
398 ioat_unmap(pdev, pq->q_addr - offset, len,
399 PCI_DMA_BIDIRECTIONAL, flags, 1);
400 }
401 break;
402 }
403 case IOAT_OP_PQ_16S:
404 case IOAT_OP_PQ_VAL_16S: {
405 struct ioat_pq_descriptor *pq = desc->pq;
406 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
407 struct ioat_raw_descriptor *descs[4];
408 int i;
409
410 /* in the 'continue' case don't unmap the dests as sources */
411 if (dmaf_p_disabled_continue(flags))
412 src_cnt--;
413 else if (dmaf_continue(flags))
414 src_cnt -= 3;
415
416 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
417 descs[0] = (struct ioat_raw_descriptor *)pq;
418 descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
419 descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
420 for (i = 0; i < src_cnt; i++) {
421 dma_addr_t src = pq16_get_src(descs, i);
422
423 ioat_unmap(pdev, src - offset, len,
424 PCI_DMA_TODEVICE, flags, 0);
425 }
426
427 /* the dests are sources in pq validate operations */
428 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
429 if (!(flags & DMA_PREP_PQ_DISABLE_P))
430 ioat_unmap(pdev, pq->p_addr - offset,
431 len, PCI_DMA_TODEVICE,
432 flags, 0);
433 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
434 ioat_unmap(pdev, pq->q_addr - offset,
435 len, PCI_DMA_TODEVICE,
436 flags, 0);
437 break;
438 }
439 }
440
441 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
442 if (!(flags & DMA_PREP_PQ_DISABLE_P))
443 ioat_unmap(pdev, pq->p_addr - offset, len,
444 PCI_DMA_BIDIRECTIONAL, flags, 1);
445 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
446 ioat_unmap(pdev, pq->q_addr - offset, len,
447 PCI_DMA_BIDIRECTIONAL, flags, 1);
448 }
449 break;
450 }
451 default:
452 dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
453 __func__, desc->hw->ctl_f.op);
454 }
455} 279}
456 280
457static bool desc_has_ext(struct ioat_ring_ent *desc) 281static bool desc_has_ext(struct ioat_ring_ent *desc)
@@ -577,7 +401,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
577 tx = &desc->txd; 401 tx = &desc->txd;
578 if (tx->cookie) { 402 if (tx->cookie) {
579 dma_cookie_complete(tx); 403 dma_cookie_complete(tx);
580 ioat3_dma_unmap(ioat, desc, idx + i); 404 dma_descriptor_unmap(tx);
581 if (tx->callback) { 405 if (tx->callback) {
582 tx->callback(tx->callback_param); 406 tx->callback(tx->callback_param);
583 tx->callback = NULL; 407 tx->callback = NULL;
@@ -807,7 +631,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
807 enum dma_status ret; 631 enum dma_status ret;
808 632
809 ret = dma_cookie_status(c, cookie, txstate); 633 ret = dma_cookie_status(c, cookie, txstate);
810 if (ret == DMA_SUCCESS) 634 if (ret == DMA_COMPLETE)
811 return ret; 635 return ret;
812 636
813 ioat3_cleanup(ioat); 637 ioat3_cleanup(ioat);
@@ -1129,9 +953,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
1129 u8 op; 953 u8 op;
1130 int i, s, idx, num_descs; 954 int i, s, idx, num_descs;
1131 955
1132 /* this function only handles src_cnt 9 - 16 */
1133 BUG_ON(src_cnt < 9);
1134
1135 /* this function is only called with 9-16 sources */ 956 /* this function is only called with 9-16 sources */
1136 op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; 957 op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
1137 958
@@ -1159,8 +980,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
1159 980
1160 descs[0] = (struct ioat_raw_descriptor *) pq; 981 descs[0] = (struct ioat_raw_descriptor *) pq;
1161 982
1162 desc->sed = ioat3_alloc_sed(device, 983 desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
1163 sed_get_pq16_pool_idx(src_cnt));
1164 if (!desc->sed) { 984 if (!desc->sed) {
1165 dev_err(to_dev(chan), 985 dev_err(to_dev(chan),
1166 "%s: no free sed entries\n", __func__); 986 "%s: no free sed entries\n", __func__);
@@ -1218,13 +1038,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
1218 return &desc->txd; 1038 return &desc->txd;
1219} 1039}
1220 1040
1041static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
1042{
1043 if (dmaf_p_disabled_continue(flags))
1044 return src_cnt + 1;
1045 else if (dmaf_continue(flags))
1046 return src_cnt + 3;
1047 else
1048 return src_cnt;
1049}
1050
1221static struct dma_async_tx_descriptor * 1051static struct dma_async_tx_descriptor *
1222ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 1052ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
1223 unsigned int src_cnt, const unsigned char *scf, size_t len, 1053 unsigned int src_cnt, const unsigned char *scf, size_t len,
1224 unsigned long flags) 1054 unsigned long flags)
1225{ 1055{
1226 struct dma_device *dma = chan->device;
1227
1228 /* specify valid address for disabled result */ 1056 /* specify valid address for disabled result */
1229 if (flags & DMA_PREP_PQ_DISABLE_P) 1057 if (flags & DMA_PREP_PQ_DISABLE_P)
1230 dst[0] = dst[1]; 1058 dst[0] = dst[1];
@@ -1244,7 +1072,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
1244 single_source_coef[0] = scf[0]; 1072 single_source_coef[0] = scf[0];
1245 single_source_coef[1] = 0; 1073 single_source_coef[1] = 0;
1246 1074
1247 return (src_cnt > 8) && (dma->max_pq > 8) ? 1075 return src_cnt_flags(src_cnt, flags) > 8 ?
1248 __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, 1076 __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
1249 2, single_source_coef, len, 1077 2, single_source_coef, len,
1250 flags) : 1078 flags) :
@@ -1252,7 +1080,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
1252 single_source_coef, len, flags); 1080 single_source_coef, len, flags);
1253 1081
1254 } else { 1082 } else {
1255 return (src_cnt > 8) && (dma->max_pq > 8) ? 1083 return src_cnt_flags(src_cnt, flags) > 8 ?
1256 __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, 1084 __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
1257 scf, len, flags) : 1085 scf, len, flags) :
1258 __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, 1086 __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
@@ -1265,8 +1093,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
1265 unsigned int src_cnt, const unsigned char *scf, size_t len, 1093 unsigned int src_cnt, const unsigned char *scf, size_t len,
1266 enum sum_check_flags *pqres, unsigned long flags) 1094 enum sum_check_flags *pqres, unsigned long flags)
1267{ 1095{
1268 struct dma_device *dma = chan->device;
1269
1270 /* specify valid address for disabled result */ 1096 /* specify valid address for disabled result */
1271 if (flags & DMA_PREP_PQ_DISABLE_P) 1097 if (flags & DMA_PREP_PQ_DISABLE_P)
1272 pq[0] = pq[1]; 1098 pq[0] = pq[1];
@@ -1278,7 +1104,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
1278 */ 1104 */
1279 *pqres = 0; 1105 *pqres = 0;
1280 1106
1281 return (src_cnt > 8) && (dma->max_pq > 8) ? 1107 return src_cnt_flags(src_cnt, flags) > 8 ?
1282 __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, 1108 __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
1283 flags) : 1109 flags) :
1284 __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, 1110 __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
@@ -1289,7 +1115,6 @@ static struct dma_async_tx_descriptor *
1289ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, 1115ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
1290 unsigned int src_cnt, size_t len, unsigned long flags) 1116 unsigned int src_cnt, size_t len, unsigned long flags)
1291{ 1117{
1292 struct dma_device *dma = chan->device;
1293 unsigned char scf[src_cnt]; 1118 unsigned char scf[src_cnt];
1294 dma_addr_t pq[2]; 1119 dma_addr_t pq[2];
1295 1120
@@ -1298,7 +1123,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
1298 flags |= DMA_PREP_PQ_DISABLE_Q; 1123 flags |= DMA_PREP_PQ_DISABLE_Q;
1299 pq[1] = dst; /* specify valid address for disabled result */ 1124 pq[1] = dst; /* specify valid address for disabled result */
1300 1125
1301 return (src_cnt > 8) && (dma->max_pq > 8) ? 1126 return src_cnt_flags(src_cnt, flags) > 8 ?
1302 __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, 1127 __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
1303 flags) : 1128 flags) :
1304 __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, 1129 __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
@@ -1310,7 +1135,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
1310 unsigned int src_cnt, size_t len, 1135 unsigned int src_cnt, size_t len,
1311 enum sum_check_flags *result, unsigned long flags) 1136 enum sum_check_flags *result, unsigned long flags)
1312{ 1137{
1313 struct dma_device *dma = chan->device;
1314 unsigned char scf[src_cnt]; 1138 unsigned char scf[src_cnt];
1315 dma_addr_t pq[2]; 1139 dma_addr_t pq[2];
1316 1140
@@ -1324,8 +1148,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
1324 flags |= DMA_PREP_PQ_DISABLE_Q; 1148 flags |= DMA_PREP_PQ_DISABLE_Q;
1325 pq[1] = pq[0]; /* specify valid address for disabled result */ 1149 pq[1] = pq[0]; /* specify valid address for disabled result */
1326 1150
1327 1151 return src_cnt_flags(src_cnt, flags) > 8 ?
1328 return (src_cnt > 8) && (dma->max_pq > 8) ?
1329 __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, 1152 __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
1330 scf, len, flags) : 1153 scf, len, flags) :
1331 __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, 1154 __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
@@ -1444,9 +1267,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1444 DMA_TO_DEVICE); 1267 DMA_TO_DEVICE);
1445 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1268 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1446 IOAT_NUM_SRC_TEST, PAGE_SIZE, 1269 IOAT_NUM_SRC_TEST, PAGE_SIZE,
1447 DMA_PREP_INTERRUPT | 1270 DMA_PREP_INTERRUPT);
1448 DMA_COMPL_SKIP_SRC_UNMAP |
1449 DMA_COMPL_SKIP_DEST_UNMAP);
1450 1271
1451 if (!tx) { 1272 if (!tx) {
1452 dev_err(dev, "Self-test xor prep failed\n"); 1273 dev_err(dev, "Self-test xor prep failed\n");
@@ -1468,7 +1289,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1468 1289
1469 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1290 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1470 1291
1471 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1292 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1472 dev_err(dev, "Self-test xor timed out\n"); 1293 dev_err(dev, "Self-test xor timed out\n");
1473 err = -ENODEV; 1294 err = -ENODEV;
1474 goto dma_unmap; 1295 goto dma_unmap;
@@ -1507,9 +1328,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1507 DMA_TO_DEVICE); 1328 DMA_TO_DEVICE);
1508 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1329 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1509 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1330 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1510 &xor_val_result, DMA_PREP_INTERRUPT | 1331 &xor_val_result, DMA_PREP_INTERRUPT);
1511 DMA_COMPL_SKIP_SRC_UNMAP |
1512 DMA_COMPL_SKIP_DEST_UNMAP);
1513 if (!tx) { 1332 if (!tx) {
1514 dev_err(dev, "Self-test zero prep failed\n"); 1333 dev_err(dev, "Self-test zero prep failed\n");
1515 err = -ENODEV; 1334 err = -ENODEV;
@@ -1530,7 +1349,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1530 1349
1531 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1350 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1532 1351
1533 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1352 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1534 dev_err(dev, "Self-test validate timed out\n"); 1353 dev_err(dev, "Self-test validate timed out\n");
1535 err = -ENODEV; 1354 err = -ENODEV;
1536 goto dma_unmap; 1355 goto dma_unmap;
@@ -1545,6 +1364,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1545 goto free_resources; 1364 goto free_resources;
1546 } 1365 }
1547 1366
1367 memset(page_address(dest), 0, PAGE_SIZE);
1368
1548 /* test for non-zero parity sum */ 1369 /* test for non-zero parity sum */
1549 op = IOAT_OP_XOR_VAL; 1370 op = IOAT_OP_XOR_VAL;
1550 1371
@@ -1554,9 +1375,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1554 DMA_TO_DEVICE); 1375 DMA_TO_DEVICE);
1555 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1376 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1556 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1377 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1557 &xor_val_result, DMA_PREP_INTERRUPT | 1378 &xor_val_result, DMA_PREP_INTERRUPT);
1558 DMA_COMPL_SKIP_SRC_UNMAP |
1559 DMA_COMPL_SKIP_DEST_UNMAP);
1560 if (!tx) { 1379 if (!tx) {
1561 dev_err(dev, "Self-test 2nd zero prep failed\n"); 1380 dev_err(dev, "Self-test 2nd zero prep failed\n");
1562 err = -ENODEV; 1381 err = -ENODEV;
@@ -1577,7 +1396,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1577 1396
1578 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1397 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1579 1398
1580 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1399 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1581 dev_err(dev, "Self-test 2nd validate timed out\n"); 1400 dev_err(dev, "Self-test 2nd validate timed out\n");
1582 err = -ENODEV; 1401 err = -ENODEV;
1583 goto dma_unmap; 1402 goto dma_unmap;
@@ -1630,52 +1449,36 @@ static int ioat3_dma_self_test(struct ioatdma_device *device)
1630 1449
1631static int ioat3_irq_reinit(struct ioatdma_device *device) 1450static int ioat3_irq_reinit(struct ioatdma_device *device)
1632{ 1451{
1633 int msixcnt = device->common.chancnt;
1634 struct pci_dev *pdev = device->pdev; 1452 struct pci_dev *pdev = device->pdev;
1635 int i; 1453 int irq = pdev->irq, i;
1636 struct msix_entry *msix; 1454
1637 struct ioat_chan_common *chan; 1455 if (!is_bwd_ioat(pdev))
1638 int err = 0; 1456 return 0;
1639 1457
1640 switch (device->irq_mode) { 1458 switch (device->irq_mode) {
1641 case IOAT_MSIX: 1459 case IOAT_MSIX:
1460 for (i = 0; i < device->common.chancnt; i++) {
1461 struct msix_entry *msix = &device->msix_entries[i];
1462 struct ioat_chan_common *chan;
1642 1463
1643 for (i = 0; i < msixcnt; i++) {
1644 msix = &device->msix_entries[i];
1645 chan = ioat_chan_by_index(device, i); 1464 chan = ioat_chan_by_index(device, i);
1646 devm_free_irq(&pdev->dev, msix->vector, chan); 1465 devm_free_irq(&pdev->dev, msix->vector, chan);
1647 } 1466 }
1648 1467
1649 pci_disable_msix(pdev); 1468 pci_disable_msix(pdev);
1650 break; 1469 break;
1651
1652 case IOAT_MSIX_SINGLE:
1653 msix = &device->msix_entries[0];
1654 chan = ioat_chan_by_index(device, 0);
1655 devm_free_irq(&pdev->dev, msix->vector, chan);
1656 pci_disable_msix(pdev);
1657 break;
1658
1659 case IOAT_MSI: 1470 case IOAT_MSI:
1660 chan = ioat_chan_by_index(device, 0);
1661 devm_free_irq(&pdev->dev, pdev->irq, chan);
1662 pci_disable_msi(pdev); 1471 pci_disable_msi(pdev);
1663 break; 1472 /* fall through */
1664
1665 case IOAT_INTX: 1473 case IOAT_INTX:
1666 chan = ioat_chan_by_index(device, 0); 1474 devm_free_irq(&pdev->dev, irq, device);
1667 devm_free_irq(&pdev->dev, pdev->irq, chan);
1668 break; 1475 break;
1669
1670 default: 1476 default:
1671 return 0; 1477 return 0;
1672 } 1478 }
1673
1674 device->irq_mode = IOAT_NOIRQ; 1479 device->irq_mode = IOAT_NOIRQ;
1675 1480
1676 err = ioat_dma_setup_interrupts(device); 1481 return ioat_dma_setup_interrupts(device);
1677
1678 return err;
1679} 1482}
1680 1483
1681static int ioat3_reset_hw(struct ioat_chan_common *chan) 1484static int ioat3_reset_hw(struct ioat_chan_common *chan)
@@ -1718,14 +1521,12 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
1718 } 1521 }
1719 1522
1720 err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); 1523 err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
1721 if (err) { 1524 if (!err)
1722 dev_err(&pdev->dev, "Failed to reset!\n");
1723 return err;
1724 }
1725
1726 if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
1727 err = ioat3_irq_reinit(device); 1525 err = ioat3_irq_reinit(device);
1728 1526
1527 if (err)
1528 dev_err(&pdev->dev, "Failed to reset: %d\n", err);
1529
1729 return err; 1530 return err;
1730} 1531}
1731 1532
@@ -1835,21 +1636,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1835 char pool_name[14]; 1636 char pool_name[14];
1836 int i; 1637 int i;
1837 1638
1838 /* allocate sw descriptor pool for SED */
1839 device->sed_pool = kmem_cache_create("ioat_sed",
1840 sizeof(struct ioat_sed_ent), 0, 0, NULL);
1841 if (!device->sed_pool)
1842 return -ENOMEM;
1843
1844 for (i = 0; i < MAX_SED_POOLS; i++) { 1639 for (i = 0; i < MAX_SED_POOLS; i++) {
1845 snprintf(pool_name, 14, "ioat_hw%d_sed", i); 1640 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1846 1641
1847 /* allocate SED DMA pool */ 1642 /* allocate SED DMA pool */
1848 device->sed_hw_pool[i] = dma_pool_create(pool_name, 1643 device->sed_hw_pool[i] = dmam_pool_create(pool_name,
1849 &pdev->dev, 1644 &pdev->dev,
1850 SED_SIZE * (i + 1), 64, 0); 1645 SED_SIZE * (i + 1), 64, 0);
1851 if (!device->sed_hw_pool[i]) 1646 if (!device->sed_hw_pool[i])
1852 goto sed_pool_cleanup; 1647 return -ENOMEM;
1853 1648
1854 } 1649 }
1855 } 1650 }
@@ -1875,28 +1670,4 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1875 device->dca = ioat3_dca_init(pdev, device->reg_base); 1670 device->dca = ioat3_dca_init(pdev, device->reg_base);
1876 1671
1877 return 0; 1672 return 0;
1878
1879sed_pool_cleanup:
1880 if (device->sed_pool) {
1881 int i;
1882 kmem_cache_destroy(device->sed_pool);
1883
1884 for (i = 0; i < MAX_SED_POOLS; i++)
1885 if (device->sed_hw_pool[i])
1886 dma_pool_destroy(device->sed_hw_pool[i]);
1887 }
1888
1889 return -ENOMEM;
1890}
1891
1892void ioat3_dma_remove(struct ioatdma_device *device)
1893{
1894 if (device->sed_pool) {
1895 int i;
1896 kmem_cache_destroy(device->sed_pool);
1897
1898 for (i = 0; i < MAX_SED_POOLS; i++)
1899 if (device->sed_hw_pool[i])
1900 dma_pool_destroy(device->sed_hw_pool[i]);
1901 }
1902} 1673}
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 2c8d560e6334..1d051cd045db 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -123,6 +123,7 @@ module_param(ioat_dca_enabled, int, 0644);
123MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); 123MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
124 124
125struct kmem_cache *ioat2_cache; 125struct kmem_cache *ioat2_cache;
126struct kmem_cache *ioat3_sed_cache;
126 127
127#define DRV_NAME "ioatdma" 128#define DRV_NAME "ioatdma"
128 129
@@ -207,9 +208,6 @@ static void ioat_remove(struct pci_dev *pdev)
207 if (!device) 208 if (!device)
208 return; 209 return;
209 210
210 if (device->version >= IOAT_VER_3_0)
211 ioat3_dma_remove(device);
212
213 dev_err(&pdev->dev, "Removing dma and dca services\n"); 211 dev_err(&pdev->dev, "Removing dma and dca services\n");
214 if (device->dca) { 212 if (device->dca) {
215 unregister_dca_provider(device->dca, &pdev->dev); 213 unregister_dca_provider(device->dca, &pdev->dev);
@@ -221,7 +219,7 @@ static void ioat_remove(struct pci_dev *pdev)
221 219
222static int __init ioat_init_module(void) 220static int __init ioat_init_module(void)
223{ 221{
224 int err; 222 int err = -ENOMEM;
225 223
226 pr_info("%s: Intel(R) QuickData Technology Driver %s\n", 224 pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
227 DRV_NAME, IOAT_DMA_VERSION); 225 DRV_NAME, IOAT_DMA_VERSION);
@@ -231,9 +229,21 @@ static int __init ioat_init_module(void)
231 if (!ioat2_cache) 229 if (!ioat2_cache)
232 return -ENOMEM; 230 return -ENOMEM;
233 231
232 ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
233 if (!ioat3_sed_cache)
234 goto err_ioat2_cache;
235
234 err = pci_register_driver(&ioat_pci_driver); 236 err = pci_register_driver(&ioat_pci_driver);
235 if (err) 237 if (err)
236 kmem_cache_destroy(ioat2_cache); 238 goto err_ioat3_cache;
239
240 return 0;
241
242 err_ioat3_cache:
243 kmem_cache_destroy(ioat3_sed_cache);
244
245 err_ioat2_cache:
246 kmem_cache_destroy(ioat2_cache);
237 247
238 return err; 248 return err;
239} 249}
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index dd8b44a56e5d..c56137bc3868 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
61 } 61 }
62} 62}
63 63
64static void
65iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
66{
67 struct dma_async_tx_descriptor *tx = &desc->async_tx;
68 struct iop_adma_desc_slot *unmap = desc->group_head;
69 struct device *dev = &iop_chan->device->pdev->dev;
70 u32 len = unmap->unmap_len;
71 enum dma_ctrl_flags flags = tx->flags;
72 u32 src_cnt;
73 dma_addr_t addr;
74 dma_addr_t dest;
75
76 src_cnt = unmap->unmap_src_cnt;
77 dest = iop_desc_get_dest_addr(unmap, iop_chan);
78 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
79 enum dma_data_direction dir;
80
81 if (src_cnt > 1) /* is xor? */
82 dir = DMA_BIDIRECTIONAL;
83 else
84 dir = DMA_FROM_DEVICE;
85
86 dma_unmap_page(dev, dest, len, dir);
87 }
88
89 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
90 while (src_cnt--) {
91 addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
92 if (addr == dest)
93 continue;
94 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
95 }
96 }
97 desc->group_head = NULL;
98}
99
100static void
101iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
102{
103 struct dma_async_tx_descriptor *tx = &desc->async_tx;
104 struct iop_adma_desc_slot *unmap = desc->group_head;
105 struct device *dev = &iop_chan->device->pdev->dev;
106 u32 len = unmap->unmap_len;
107 enum dma_ctrl_flags flags = tx->flags;
108 u32 src_cnt = unmap->unmap_src_cnt;
109 dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
110 dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
111 int i;
112
113 if (tx->flags & DMA_PREP_CONTINUE)
114 src_cnt -= 3;
115
116 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
117 dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
118 dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
119 }
120
121 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
122 dma_addr_t addr;
123
124 for (i = 0; i < src_cnt; i++) {
125 addr = iop_desc_get_src_addr(unmap, iop_chan, i);
126 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
127 }
128 if (desc->pq_check_result) {
129 dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
130 dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
131 }
132 }
133
134 desc->group_head = NULL;
135}
136
137
138static dma_cookie_t 64static dma_cookie_t
139iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, 65iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
140 struct iop_adma_chan *iop_chan, dma_cookie_t cookie) 66 struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
@@ -152,15 +78,9 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
152 if (tx->callback) 78 if (tx->callback)
153 tx->callback(tx->callback_param); 79 tx->callback(tx->callback_param);
154 80
155 /* unmap dma addresses 81 dma_descriptor_unmap(tx);
156 * (unmap_single vs unmap_page?) 82 if (desc->group_head)
157 */ 83 desc->group_head = NULL;
158 if (desc->group_head && desc->unmap_len) {
159 if (iop_desc_is_pq(desc))
160 iop_desc_unmap_pq(iop_chan, desc);
161 else
162 iop_desc_unmap(iop_chan, desc);
163 }
164 } 84 }
165 85
166 /* run dependent operations */ 86 /* run dependent operations */
@@ -591,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
591 if (sw_desc) { 511 if (sw_desc) {
592 grp_start = sw_desc->group_head; 512 grp_start = sw_desc->group_head;
593 iop_desc_init_interrupt(grp_start, iop_chan); 513 iop_desc_init_interrupt(grp_start, iop_chan);
594 grp_start->unmap_len = 0;
595 sw_desc->async_tx.flags = flags; 514 sw_desc->async_tx.flags = flags;
596 } 515 }
597 spin_unlock_bh(&iop_chan->lock); 516 spin_unlock_bh(&iop_chan->lock);
@@ -623,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
623 iop_desc_set_byte_count(grp_start, iop_chan, len); 542 iop_desc_set_byte_count(grp_start, iop_chan, len);
624 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 543 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
625 iop_desc_set_memcpy_src_addr(grp_start, dma_src); 544 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
626 sw_desc->unmap_src_cnt = 1;
627 sw_desc->unmap_len = len;
628 sw_desc->async_tx.flags = flags; 545 sw_desc->async_tx.flags = flags;
629 } 546 }
630 spin_unlock_bh(&iop_chan->lock); 547 spin_unlock_bh(&iop_chan->lock);
@@ -657,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
657 iop_desc_init_xor(grp_start, src_cnt, flags); 574 iop_desc_init_xor(grp_start, src_cnt, flags);
658 iop_desc_set_byte_count(grp_start, iop_chan, len); 575 iop_desc_set_byte_count(grp_start, iop_chan, len);
659 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 576 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
660 sw_desc->unmap_src_cnt = src_cnt;
661 sw_desc->unmap_len = len;
662 sw_desc->async_tx.flags = flags; 577 sw_desc->async_tx.flags = flags;
663 while (src_cnt--) 578 while (src_cnt--)
664 iop_desc_set_xor_src_addr(grp_start, src_cnt, 579 iop_desc_set_xor_src_addr(grp_start, src_cnt,
@@ -694,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
694 grp_start->xor_check_result = result; 609 grp_start->xor_check_result = result;
695 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 610 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
696 __func__, grp_start->xor_check_result); 611 __func__, grp_start->xor_check_result);
697 sw_desc->unmap_src_cnt = src_cnt;
698 sw_desc->unmap_len = len;
699 sw_desc->async_tx.flags = flags; 612 sw_desc->async_tx.flags = flags;
700 while (src_cnt--) 613 while (src_cnt--)
701 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, 614 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
@@ -748,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
748 dst[0] = dst[1] & 0x7; 661 dst[0] = dst[1] & 0x7;
749 662
750 iop_desc_set_pq_addr(g, dst); 663 iop_desc_set_pq_addr(g, dst);
751 sw_desc->unmap_src_cnt = src_cnt;
752 sw_desc->unmap_len = len;
753 sw_desc->async_tx.flags = flags; 664 sw_desc->async_tx.flags = flags;
754 for (i = 0; i < src_cnt; i++) 665 for (i = 0; i < src_cnt; i++)
755 iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); 666 iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
@@ -804,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
804 g->pq_check_result = pqres; 715 g->pq_check_result = pqres;
805 pr_debug("\t%s: g->pq_check_result: %p\n", 716 pr_debug("\t%s: g->pq_check_result: %p\n",
806 __func__, g->pq_check_result); 717 __func__, g->pq_check_result);
807 sw_desc->unmap_src_cnt = src_cnt+2;
808 sw_desc->unmap_len = len;
809 sw_desc->async_tx.flags = flags; 718 sw_desc->async_tx.flags = flags;
810 while (src_cnt--) 719 while (src_cnt--)
811 iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, 720 iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
@@ -864,7 +773,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
864 int ret; 773 int ret;
865 774
866 ret = dma_cookie_status(chan, cookie, txstate); 775 ret = dma_cookie_status(chan, cookie, txstate);
867 if (ret == DMA_SUCCESS) 776 if (ret == DMA_COMPLETE)
868 return ret; 777 return ret;
869 778
870 iop_adma_slot_cleanup(iop_chan); 779 iop_adma_slot_cleanup(iop_chan);
@@ -983,7 +892,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
983 msleep(1); 892 msleep(1);
984 893
985 if (iop_adma_status(dma_chan, cookie, NULL) != 894 if (iop_adma_status(dma_chan, cookie, NULL) !=
986 DMA_SUCCESS) { 895 DMA_COMPLETE) {
987 dev_err(dma_chan->device->dev, 896 dev_err(dma_chan->device->dev,
988 "Self-test copy timed out, disabling\n"); 897 "Self-test copy timed out, disabling\n");
989 err = -ENODEV; 898 err = -ENODEV;
@@ -1083,7 +992,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1083 msleep(8); 992 msleep(8);
1084 993
1085 if (iop_adma_status(dma_chan, cookie, NULL) != 994 if (iop_adma_status(dma_chan, cookie, NULL) !=
1086 DMA_SUCCESS) { 995 DMA_COMPLETE) {
1087 dev_err(dma_chan->device->dev, 996 dev_err(dma_chan->device->dev,
1088 "Self-test xor timed out, disabling\n"); 997 "Self-test xor timed out, disabling\n");
1089 err = -ENODEV; 998 err = -ENODEV;
@@ -1129,7 +1038,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1129 iop_adma_issue_pending(dma_chan); 1038 iop_adma_issue_pending(dma_chan);
1130 msleep(8); 1039 msleep(8);
1131 1040
1132 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1041 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1133 dev_err(dma_chan->device->dev, 1042 dev_err(dma_chan->device->dev,
1134 "Self-test zero sum timed out, disabling\n"); 1043 "Self-test zero sum timed out, disabling\n");
1135 err = -ENODEV; 1044 err = -ENODEV;
@@ -1158,7 +1067,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1158 iop_adma_issue_pending(dma_chan); 1067 iop_adma_issue_pending(dma_chan);
1159 msleep(8); 1068 msleep(8);
1160 1069
1161 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1070 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1162 dev_err(dma_chan->device->dev, 1071 dev_err(dma_chan->device->dev,
1163 "Self-test non-zero sum timed out, disabling\n"); 1072 "Self-test non-zero sum timed out, disabling\n");
1164 err = -ENODEV; 1073 err = -ENODEV;
@@ -1254,7 +1163,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1254 msleep(8); 1163 msleep(8);
1255 1164
1256 if (iop_adma_status(dma_chan, cookie, NULL) != 1165 if (iop_adma_status(dma_chan, cookie, NULL) !=
1257 DMA_SUCCESS) { 1166 DMA_COMPLETE) {
1258 dev_err(dev, "Self-test pq timed out, disabling\n"); 1167 dev_err(dev, "Self-test pq timed out, disabling\n");
1259 err = -ENODEV; 1168 err = -ENODEV;
1260 goto free_resources; 1169 goto free_resources;
@@ -1291,7 +1200,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1291 msleep(8); 1200 msleep(8);
1292 1201
1293 if (iop_adma_status(dma_chan, cookie, NULL) != 1202 if (iop_adma_status(dma_chan, cookie, NULL) !=
1294 DMA_SUCCESS) { 1203 DMA_COMPLETE) {
1295 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); 1204 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1296 err = -ENODEV; 1205 err = -ENODEV;
1297 goto free_resources; 1206 goto free_resources;
@@ -1323,7 +1232,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1323 msleep(8); 1232 msleep(8);
1324 1233
1325 if (iop_adma_status(dma_chan, cookie, NULL) != 1234 if (iop_adma_status(dma_chan, cookie, NULL) !=
1326 DMA_SUCCESS) { 1235 DMA_COMPLETE) {
1327 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); 1236 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1328 err = -ENODEV; 1237 err = -ENODEV;
1329 goto free_resources; 1238 goto free_resources;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index cb9c0bc317e8..128ca143486d 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1232,8 +1232,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1232 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); 1232 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
1233 descnew = desc; 1233 descnew = desc;
1234 1234
1235 dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", 1235 dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n",
1236 irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); 1236 irq, (u64)sg_dma_address(*sg),
1237 sgnext ? (u64)sg_dma_address(sgnext) : 0,
1238 ichan->active_buffer, curbuf);
1237 1239
1238 /* Find the descriptor of sgnext */ 1240 /* Find the descriptor of sgnext */
1239 sgnew = idmac_sg_next(ichan, &descnew, *sg); 1241 sgnew = idmac_sg_next(ichan, &descnew, *sg);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a2c330f5f952..e26075408e9b 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
344 size_t bytes = 0; 344 size_t bytes = 0;
345 345
346 ret = dma_cookie_status(&c->vc.chan, cookie, state); 346 ret = dma_cookie_status(&c->vc.chan, cookie, state);
347 if (ret == DMA_SUCCESS) 347 if (ret == DMA_COMPLETE)
348 return ret; 348 return ret;
349 349
350 spin_lock_irqsave(&c->vc.lock, flags); 350 spin_lock_irqsave(&c->vc.lock, flags);
@@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op)
693 693
694 irq = platform_get_irq(op, 0); 694 irq = platform_get_irq(op, 0);
695 ret = devm_request_irq(&op->dev, irq, 695 ret = devm_request_irq(&op->dev, irq,
696 k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d); 696 k3_dma_int_handler, 0, DRIVER_NAME, d);
697 if (ret) 697 if (ret)
698 return ret; 698 return ret;
699 699
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index ff8d7827f8cb..8869500ab92b 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data)
798 * move the descriptors to a temporary list so we can drop 798 * move the descriptors to a temporary list so we can drop
799 * the lock during the entire cleanup operation 799 * the lock during the entire cleanup operation
800 */ 800 */
801 list_del(&desc->node); 801 list_move(&desc->node, &chain_cleanup);
802 list_add(&desc->node, &chain_cleanup);
803 802
804 /* 803 /*
805 * Look for the first list entry which has the ENDIRQEN flag 804 * Look for the first list entry which has the ENDIRQEN flag
@@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
863 862
864 if (irq) { 863 if (irq) {
865 ret = devm_request_irq(pdev->dev, irq, 864 ret = devm_request_irq(pdev->dev, irq,
866 mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); 865 mmp_pdma_chan_handler, 0, "pdma", phy);
867 if (ret) { 866 if (ret) {
868 dev_err(pdev->dev, "channel request irq fail!\n"); 867 dev_err(pdev->dev, "channel request irq fail!\n");
869 return ret; 868 return ret;
@@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op)
970 /* all chan share one irq, demux inside */ 969 /* all chan share one irq, demux inside */
971 irq = platform_get_irq(op, 0); 970 irq = platform_get_irq(op, 0);
972 ret = devm_request_irq(pdev->dev, irq, 971 ret = devm_request_irq(pdev->dev, irq,
973 mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); 972 mmp_pdma_int_handler, 0, "pdma", pdev);
974 if (ret) 973 if (ret)
975 return ret; 974 return ret;
976 } 975 }
@@ -1018,6 +1017,7 @@ static int mmp_pdma_probe(struct platform_device *op)
1018 } 1017 }
1019 } 1018 }
1020 1019
1020 platform_set_drvdata(op, pdev);
1021 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); 1021 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
1022 return 0; 1022 return 0;
1023} 1023}
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index d3b6358e5a27..3ddacc14a736 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -62,6 +62,11 @@
62#define TDCR_BURSTSZ_16B (0x3 << 6) 62#define TDCR_BURSTSZ_16B (0x3 << 6)
63#define TDCR_BURSTSZ_32B (0x6 << 6) 63#define TDCR_BURSTSZ_32B (0x6 << 6)
64#define TDCR_BURSTSZ_64B (0x7 << 6) 64#define TDCR_BURSTSZ_64B (0x7 << 6)
65#define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
66#define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
67#define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
68#define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
69#define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
65#define TDCR_BURSTSZ_SQU_32B (0x7 << 6) 70#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
66#define TDCR_BURSTSZ_128B (0x5 << 6) 71#define TDCR_BURSTSZ_128B (0x5 << 6)
67#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ 72#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
@@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
158 /* disable irq */ 163 /* disable irq */
159 writel(0, tdmac->reg_base + TDIMR); 164 writel(0, tdmac->reg_base + TDIMR);
160 165
161 tdmac->status = DMA_SUCCESS; 166 tdmac->status = DMA_COMPLETE;
162} 167}
163 168
164static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) 169static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
@@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
228 return -EINVAL; 233 return -EINVAL;
229 } 234 }
230 } else if (tdmac->type == PXA910_SQU) { 235 } else if (tdmac->type == PXA910_SQU) {
231 tdcr |= TDCR_BURSTSZ_SQU_32B;
232 tdcr |= TDCR_SSPMOD; 236 tdcr |= TDCR_SSPMOD;
237
238 switch (tdmac->burst_sz) {
239 case 1:
240 tdcr |= TDCR_BURSTSZ_SQU_1B;
241 break;
242 case 2:
243 tdcr |= TDCR_BURSTSZ_SQU_2B;
244 break;
245 case 4:
246 tdcr |= TDCR_BURSTSZ_SQU_4B;
247 break;
248 case 8:
249 tdcr |= TDCR_BURSTSZ_SQU_8B;
250 break;
251 case 16:
252 tdcr |= TDCR_BURSTSZ_SQU_16B;
253 break;
254 case 32:
255 tdcr |= TDCR_BURSTSZ_SQU_32B;
256 break;
257 default:
258 dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
259 return -EINVAL;
260 }
233 } 261 }
234 262
235 writel(tdcr, tdmac->reg_base + TDCR); 263 writel(tdcr, tdmac->reg_base + TDCR);
@@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
324 352
325 if (tdmac->irq) { 353 if (tdmac->irq) {
326 ret = devm_request_irq(tdmac->dev, tdmac->irq, 354 ret = devm_request_irq(tdmac->dev, tdmac->irq,
327 mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); 355 mmp_tdma_chan_handler, 0, "tdma", tdmac);
328 if (ret) 356 if (ret)
329 return ret; 357 return ret;
330 } 358 }
@@ -365,7 +393,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
365 int num_periods = buf_len / period_len; 393 int num_periods = buf_len / period_len;
366 int i = 0, buf = 0; 394 int i = 0, buf = 0;
367 395
368 if (tdmac->status != DMA_SUCCESS) 396 if (tdmac->status != DMA_COMPLETE)
369 return NULL; 397 return NULL;
370 398
371 if (period_len > TDMA_MAX_XFER_BYTES) { 399 if (period_len > TDMA_MAX_XFER_BYTES) {
@@ -499,7 +527,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
499 tdmac->idx = idx; 527 tdmac->idx = idx;
500 tdmac->type = type; 528 tdmac->type = type;
501 tdmac->reg_base = (unsigned long)tdev->base + idx * 4; 529 tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
502 tdmac->status = DMA_SUCCESS; 530 tdmac->status = DMA_COMPLETE;
503 tdev->tdmac[tdmac->idx] = tdmac; 531 tdev->tdmac[tdmac->idx] = tdmac;
504 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); 532 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
505 533
@@ -554,7 +582,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
554 if (irq_num != chan_num) { 582 if (irq_num != chan_num) {
555 irq = platform_get_irq(pdev, 0); 583 irq = platform_get_irq(pdev, 0);
556 ret = devm_request_irq(&pdev->dev, irq, 584 ret = devm_request_irq(&pdev->dev, irq,
557 mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); 585 mmp_tdma_int_handler, 0, "tdma", tdev);
558 if (ret) 586 if (ret)
559 return ret; 587 return ret;
560 } 588 }
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 536dcb8ba5fd..7807f0ef4e20 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
60 return hw_desc->phy_dest_addr; 60 return hw_desc->phy_dest_addr;
61} 61}
62 62
63static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
64 int src_idx)
65{
66 struct mv_xor_desc *hw_desc = desc->hw_desc;
67 return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
68}
69
70
71static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 63static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
72 u32 byte_count) 64 u32 byte_count)
73{ 65{
@@ -278,42 +270,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
278 desc->async_tx.callback( 270 desc->async_tx.callback(
279 desc->async_tx.callback_param); 271 desc->async_tx.callback_param);
280 272
281 /* unmap dma addresses 273 dma_descriptor_unmap(&desc->async_tx);
282 * (unmap_single vs unmap_page?) 274 if (desc->group_head)
283 */
284 if (desc->group_head && desc->unmap_len) {
285 struct mv_xor_desc_slot *unmap = desc->group_head;
286 struct device *dev = mv_chan_to_devp(mv_chan);
287 u32 len = unmap->unmap_len;
288 enum dma_ctrl_flags flags = desc->async_tx.flags;
289 u32 src_cnt;
290 dma_addr_t addr;
291 dma_addr_t dest;
292
293 src_cnt = unmap->unmap_src_cnt;
294 dest = mv_desc_get_dest_addr(unmap);
295 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
296 enum dma_data_direction dir;
297
298 if (src_cnt > 1) /* is xor ? */
299 dir = DMA_BIDIRECTIONAL;
300 else
301 dir = DMA_FROM_DEVICE;
302 dma_unmap_page(dev, dest, len, dir);
303 }
304
305 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
306 while (src_cnt--) {
307 addr = mv_desc_get_src_addr(unmap,
308 src_cnt);
309 if (addr == dest)
310 continue;
311 dma_unmap_page(dev, addr, len,
312 DMA_TO_DEVICE);
313 }
314 }
315 desc->group_head = NULL; 275 desc->group_head = NULL;
316 }
317 } 276 }
318 277
319 /* run dependent operations */ 278 /* run dependent operations */
@@ -749,7 +708,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
749 enum dma_status ret; 708 enum dma_status ret;
750 709
751 ret = dma_cookie_status(chan, cookie, txstate); 710 ret = dma_cookie_status(chan, cookie, txstate);
752 if (ret == DMA_SUCCESS) { 711 if (ret == DMA_COMPLETE) {
753 mv_xor_clean_completed_slots(mv_chan); 712 mv_xor_clean_completed_slots(mv_chan);
754 return ret; 713 return ret;
755 } 714 }
@@ -874,7 +833,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
874 msleep(1); 833 msleep(1);
875 834
876 if (mv_xor_status(dma_chan, cookie, NULL) != 835 if (mv_xor_status(dma_chan, cookie, NULL) !=
877 DMA_SUCCESS) { 836 DMA_COMPLETE) {
878 dev_err(dma_chan->device->dev, 837 dev_err(dma_chan->device->dev,
879 "Self-test copy timed out, disabling\n"); 838 "Self-test copy timed out, disabling\n");
880 err = -ENODEV; 839 err = -ENODEV;
@@ -968,7 +927,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
968 msleep(8); 927 msleep(8);
969 928
970 if (mv_xor_status(dma_chan, cookie, NULL) != 929 if (mv_xor_status(dma_chan, cookie, NULL) !=
971 DMA_SUCCESS) { 930 DMA_COMPLETE) {
972 dev_err(dma_chan->device->dev, 931 dev_err(dma_chan->device->dev,
973 "Self-test xor timed out, disabling\n"); 932 "Self-test xor timed out, disabling\n");
974 err = -ENODEV; 933 err = -ENODEV;
@@ -1076,10 +1035,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
1076 } 1035 }
1077 1036
1078 mv_chan->mmr_base = xordev->xor_base; 1037 mv_chan->mmr_base = xordev->xor_base;
1079 if (!mv_chan->mmr_base) { 1038 mv_chan->mmr_high_base = xordev->xor_high_base;
1080 ret = -ENOMEM;
1081 goto err_free_dma;
1082 }
1083 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1039 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1084 mv_chan); 1040 mv_chan);
1085 1041
@@ -1138,7 +1094,7 @@ static void
1138mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, 1094mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1139 const struct mbus_dram_target_info *dram) 1095 const struct mbus_dram_target_info *dram)
1140{ 1096{
1141 void __iomem *base = xordev->xor_base; 1097 void __iomem *base = xordev->xor_high_base;
1142 u32 win_enable = 0; 1098 u32 win_enable = 0;
1143 int i; 1099 int i;
1144 1100
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 06b067f24c9b..d0749229c875 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -34,13 +34,13 @@
34#define XOR_OPERATION_MODE_MEMCPY 2 34#define XOR_OPERATION_MODE_MEMCPY 2
35#define XOR_DESCRIPTOR_SWAP BIT(14) 35#define XOR_DESCRIPTOR_SWAP BIT(14)
36 36
37#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) 37#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
38#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) 38#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
39#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) 39#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4))
40#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) 40#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4))
41#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) 41#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4))
42#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) 42#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0)
43#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) 43#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4)
44 44
45#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) 45#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
46#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) 46#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
@@ -50,11 +50,11 @@
50#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) 50#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
51#define XOR_INTR_MASK_VALUE 0x3F5 51#define XOR_INTR_MASK_VALUE 0x3F5
52 52
53#define WINDOW_BASE(w) (0x250 + ((w) << 2)) 53#define WINDOW_BASE(w) (0x50 + ((w) << 2))
54#define WINDOW_SIZE(w) (0x270 + ((w) << 2)) 54#define WINDOW_SIZE(w) (0x70 + ((w) << 2))
55#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) 55#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2))
56#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) 56#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2))
57#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2)) 57#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2))
58 58
59struct mv_xor_device { 59struct mv_xor_device {
60 void __iomem *xor_base; 60 void __iomem *xor_base;
@@ -82,6 +82,7 @@ struct mv_xor_chan {
82 int pending; 82 int pending;
83 spinlock_t lock; /* protects the descriptor slot pool */ 83 spinlock_t lock; /* protects the descriptor slot pool */
84 void __iomem *mmr_base; 84 void __iomem *mmr_base;
85 void __iomem *mmr_high_base;
85 unsigned int idx; 86 unsigned int idx;
86 int irq; 87 int irq;
87 enum dma_transaction_type current_type; 88 enum dma_transaction_type current_type;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index ccd13df841db..ead491346da7 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -27,6 +27,7 @@
27#include <linux/of.h> 27#include <linux/of.h>
28#include <linux/of_device.h> 28#include <linux/of_device.h>
29#include <linux/of_dma.h> 29#include <linux/of_dma.h>
30#include <linux/list.h>
30 31
31#include <asm/irq.h> 32#include <asm/irq.h>
32 33
@@ -57,6 +58,9 @@
57 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) 58 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
58#define HW_APBHX_CHn_SEMA(d, n) \ 59#define HW_APBHX_CHn_SEMA(d, n) \
59 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) 60 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
61#define HW_APBHX_CHn_BAR(d, n) \
62 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70)
63#define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70)
60 64
61/* 65/*
62 * ccw bits definitions 66 * ccw bits definitions
@@ -115,7 +119,9 @@ struct mxs_dma_chan {
115 int desc_count; 119 int desc_count;
116 enum dma_status status; 120 enum dma_status status;
117 unsigned int flags; 121 unsigned int flags;
122 bool reset;
118#define MXS_DMA_SG_LOOP (1 << 0) 123#define MXS_DMA_SG_LOOP (1 << 0)
124#define MXS_DMA_USE_SEMAPHORE (1 << 1)
119}; 125};
120 126
121#define MXS_DMA_CHANNELS 16 127#define MXS_DMA_CHANNELS 16
@@ -201,12 +207,47 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
201 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 207 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
202 int chan_id = mxs_chan->chan.chan_id; 208 int chan_id = mxs_chan->chan.chan_id;
203 209
204 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) 210 /*
211 * mxs dma channel resets can cause a channel stall. To recover from a
212 * channel stall, we have to reset the whole DMA engine. To avoid this,
213 * we use cyclic DMA with semaphores, that are enhanced in
214 * mxs_dma_int_handler. To reset the channel, we can simply stop writing
215 * into the semaphore counter.
216 */
217 if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
218 mxs_chan->flags & MXS_DMA_SG_LOOP) {
219 mxs_chan->reset = true;
220 } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
205 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), 221 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
206 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); 222 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
207 else 223 } else {
224 unsigned long elapsed = 0;
225 const unsigned long max_wait = 50000; /* 50ms */
226 void __iomem *reg_dbg1 = mxs_dma->base +
227 HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
228
229 /*
230 * On i.MX28 APBX, the DMA channel can stop working if we reset
231 * the channel while it is in READ_FLUSH (0x08) state.
232 * We wait here until we leave the state. Then we trigger the
233 * reset. Waiting a maximum of 50ms, the kernel shouldn't crash
234 * because of this.
235 */
236 while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) {
237 udelay(100);
238 elapsed += 100;
239 }
240
241 if (elapsed >= max_wait)
242 dev_err(&mxs_chan->mxs_dma->pdev->dev,
243 "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n",
244 chan_id);
245
208 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), 246 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
209 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); 247 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
248 }
249
250 mxs_chan->status = DMA_COMPLETE;
210} 251}
211 252
212static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) 253static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
@@ -219,12 +260,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
219 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); 260 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
220 261
221 /* write 1 to SEMA to kick off the channel */ 262 /* write 1 to SEMA to kick off the channel */
222 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); 263 if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
264 mxs_chan->flags & MXS_DMA_SG_LOOP) {
265 /* A cyclic DMA consists of at least 2 segments, so initialize
266 * the semaphore with 2 so we have enough time to add 1 to the
267 * semaphore if we need to */
268 writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
269 } else {
270 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
271 }
272 mxs_chan->reset = false;
223} 273}
224 274
225static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 275static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
226{ 276{
227 mxs_chan->status = DMA_SUCCESS; 277 mxs_chan->status = DMA_COMPLETE;
228} 278}
229 279
230static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) 280static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
@@ -272,58 +322,88 @@ static void mxs_dma_tasklet(unsigned long data)
272 mxs_chan->desc.callback(mxs_chan->desc.callback_param); 322 mxs_chan->desc.callback(mxs_chan->desc.callback_param);
273} 323}
274 324
325static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
326{
327 int i;
328
329 for (i = 0; i != mxs_dma->nr_channels; ++i)
330 if (mxs_dma->mxs_chans[i].chan_irq == irq)
331 return i;
332
333 return -EINVAL;
334}
335
275static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) 336static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
276{ 337{
277 struct mxs_dma_engine *mxs_dma = dev_id; 338 struct mxs_dma_engine *mxs_dma = dev_id;
278 u32 stat1, stat2; 339 struct mxs_dma_chan *mxs_chan;
340 u32 completed;
341 u32 err;
342 int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
343
344 if (chan < 0)
345 return IRQ_NONE;
279 346
280 /* completion status */ 347 /* completion status */
281 stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); 348 completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
282 stat1 &= MXS_DMA_CHANNELS_MASK; 349 completed = (completed >> chan) & 0x1;
283 writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); 350
351 /* Clear interrupt */
352 writel((1 << chan),
353 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
284 354
285 /* error status */ 355 /* error status */
286 stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); 356 err = readl(mxs_dma->base + HW_APBHX_CTRL2);
287 writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); 357 err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan);
358
359 /*
360 * error status bit is in the upper 16 bits, error irq bit in the lower
361 * 16 bits. We transform it into a simpler error code:
362 * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR
363 */
364 err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan);
365
366 /* Clear error irq */
367 writel((1 << chan),
368 mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
288 369
289 /* 370 /*
290 * When both completion and error of termination bits set at the 371 * When both completion and error of termination bits set at the
291 * same time, we do not take it as an error. IOW, it only becomes 372 * same time, we do not take it as an error. IOW, it only becomes
292 * an error we need to handle here in case of either it's (1) a bus 373 * an error we need to handle here in case of either it's a bus
293 * error or (2) a termination error with no completion. 374 * error or a termination error with no completion. 0x01 is termination
375 * error, so we can subtract err & completed to get the real error case.
294 */ 376 */
295 stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ 377 err -= err & completed;
296 (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
297
298 /* combine error and completion status for checking */
299 stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
300 while (stat1) {
301 int channel = fls(stat1) - 1;
302 struct mxs_dma_chan *mxs_chan =
303 &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
304
305 if (channel >= MXS_DMA_CHANNELS) {
306 dev_dbg(mxs_dma->dma_device.dev,
307 "%s: error in channel %d\n", __func__,
308 channel - MXS_DMA_CHANNELS);
309 mxs_chan->status = DMA_ERROR;
310 mxs_dma_reset_chan(mxs_chan);
311 } else {
312 if (mxs_chan->flags & MXS_DMA_SG_LOOP)
313 mxs_chan->status = DMA_IN_PROGRESS;
314 else
315 mxs_chan->status = DMA_SUCCESS;
316 }
317 378
318 stat1 &= ~(1 << channel); 379 mxs_chan = &mxs_dma->mxs_chans[chan];
319 380
320 if (mxs_chan->status == DMA_SUCCESS) 381 if (err) {
321 dma_cookie_complete(&mxs_chan->desc); 382 dev_dbg(mxs_dma->dma_device.dev,
383 "%s: error in channel %d\n", __func__,
384 chan);
385 mxs_chan->status = DMA_ERROR;
386 mxs_dma_reset_chan(mxs_chan);
387 } else if (mxs_chan->status != DMA_COMPLETE) {
388 if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
389 mxs_chan->status = DMA_IN_PROGRESS;
390 if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE)
391 writel(1, mxs_dma->base +
392 HW_APBHX_CHn_SEMA(mxs_dma, chan));
393 } else {
394 mxs_chan->status = DMA_COMPLETE;
395 }
396 }
322 397
323 /* schedule tasklet on this channel */ 398 if (mxs_chan->status == DMA_COMPLETE) {
324 tasklet_schedule(&mxs_chan->tasklet); 399 if (mxs_chan->reset)
400 return IRQ_HANDLED;
401 dma_cookie_complete(&mxs_chan->desc);
325 } 402 }
326 403
404 /* schedule tasklet on this channel */
405 tasklet_schedule(&mxs_chan->tasklet);
406
327 return IRQ_HANDLED; 407 return IRQ_HANDLED;
328} 408}
329 409
@@ -523,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
523 603
524 mxs_chan->status = DMA_IN_PROGRESS; 604 mxs_chan->status = DMA_IN_PROGRESS;
525 mxs_chan->flags |= MXS_DMA_SG_LOOP; 605 mxs_chan->flags |= MXS_DMA_SG_LOOP;
606 mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE;
526 607
527 if (num_periods > NUM_CCW) { 608 if (num_periods > NUM_CCW) {
528 dev_err(mxs_dma->dma_device.dev, 609 dev_err(mxs_dma->dma_device.dev,
@@ -554,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
554 ccw->bits |= CCW_IRQ; 635 ccw->bits |= CCW_IRQ;
555 ccw->bits |= CCW_HALT_ON_TERM; 636 ccw->bits |= CCW_HALT_ON_TERM;
556 ccw->bits |= CCW_TERM_FLUSH; 637 ccw->bits |= CCW_TERM_FLUSH;
638 ccw->bits |= CCW_DEC_SEM;
557 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? 639 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
558 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); 640 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
559 641
@@ -599,8 +681,24 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
599 dma_cookie_t cookie, struct dma_tx_state *txstate) 681 dma_cookie_t cookie, struct dma_tx_state *txstate)
600{ 682{
601 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 683 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
684 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
685 u32 residue = 0;
686
687 if (mxs_chan->status == DMA_IN_PROGRESS &&
688 mxs_chan->flags & MXS_DMA_SG_LOOP) {
689 struct mxs_dma_ccw *last_ccw;
690 u32 bar;
691
692 last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1];
693 residue = last_ccw->xfer_bytes + last_ccw->bufaddr;
694
695 bar = readl(mxs_dma->base +
696 HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
697 residue -= bar;
698 }
602 699
603 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); 700 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
701 residue);
604 702
605 return mxs_chan->status; 703 return mxs_chan->status;
606} 704}
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index ec3fc4fd9160..2f66cf4e54fe 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
248 unsigned long flags; 248 unsigned long flags;
249 249
250 ret = dma_cookie_status(chan, cookie, txstate); 250 ret = dma_cookie_status(chan, cookie, txstate);
251 if (ret == DMA_SUCCESS || !txstate) 251 if (ret == DMA_COMPLETE || !txstate)
252 return ret; 252 return ret;
253 253
254 spin_lock_irqsave(&c->vc.lock, flags); 254 spin_lock_irqsave(&c->vc.lock, flags);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index df8b10fd1726..cdf0483b8f2d 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data)
2268 list_move_tail(&desc->node, &pch->dmac->desc_pool); 2268 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2269 } 2269 }
2270 2270
2271 dma_descriptor_unmap(&desc->txd);
2272
2271 if (callback) { 2273 if (callback) {
2272 spin_unlock_irqrestore(&pch->lock, flags); 2274 spin_unlock_irqrestore(&pch->lock, flags);
2273 callback(callback_param); 2275 callback(callback_param);
@@ -2314,7 +2316,7 @@ bool pl330_filter(struct dma_chan *chan, void *param)
2314 return false; 2316 return false;
2315 2317
2316 peri_id = chan->private; 2318 peri_id = chan->private;
2317 return *peri_id == (unsigned)param; 2319 return *peri_id == (unsigned long)param;
2318} 2320}
2319EXPORT_SYMBOL(pl330_filter); 2321EXPORT_SYMBOL(pl330_filter);
2320 2322
@@ -2926,16 +2928,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2926 2928
2927 amba_set_drvdata(adev, pdmac); 2929 amba_set_drvdata(adev, pdmac);
2928 2930
2929 irq = adev->irq[0]; 2931 for (i = 0; i < AMBA_NR_IRQS; i++) {
2930 ret = request_irq(irq, pl330_irq_handler, 0, 2932 irq = adev->irq[i];
2931 dev_name(&adev->dev), pi); 2933 if (irq) {
2932 if (ret) 2934 ret = devm_request_irq(&adev->dev, irq,
2933 return ret; 2935 pl330_irq_handler, 0,
2936 dev_name(&adev->dev), pi);
2937 if (ret)
2938 return ret;
2939 } else {
2940 break;
2941 }
2942 }
2934 2943
2935 pi->pcfg.periph_id = adev->periphid; 2944 pi->pcfg.periph_id = adev->periphid;
2936 ret = pl330_add(pi); 2945 ret = pl330_add(pi);
2937 if (ret) 2946 if (ret)
2938 goto probe_err1; 2947 return ret;
2939 2948
2940 INIT_LIST_HEAD(&pdmac->desc_pool); 2949 INIT_LIST_HEAD(&pdmac->desc_pool);
2941 spin_lock_init(&pdmac->pool_lock); 2950 spin_lock_init(&pdmac->pool_lock);
@@ -3033,8 +3042,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
3033 3042
3034 return 0; 3043 return 0;
3035probe_err3: 3044probe_err3:
3036 amba_set_drvdata(adev, NULL);
3037
3038 /* Idle the DMAC */ 3045 /* Idle the DMAC */
3039 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, 3046 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
3040 chan.device_node) { 3047 chan.device_node) {
@@ -3048,8 +3055,6 @@ probe_err3:
3048 } 3055 }
3049probe_err2: 3056probe_err2:
3050 pl330_del(pi); 3057 pl330_del(pi);
3051probe_err1:
3052 free_irq(irq, pi);
3053 3058
3054 return ret; 3059 return ret;
3055} 3060}
@@ -3059,7 +3064,6 @@ static int pl330_remove(struct amba_device *adev)
3059 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); 3064 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
3060 struct dma_pl330_chan *pch, *_p; 3065 struct dma_pl330_chan *pch, *_p;
3061 struct pl330_info *pi; 3066 struct pl330_info *pi;
3062 int irq;
3063 3067
3064 if (!pdmac) 3068 if (!pdmac)
3065 return 0; 3069 return 0;
@@ -3068,7 +3072,6 @@ static int pl330_remove(struct amba_device *adev)
3068 of_dma_controller_free(adev->dev.of_node); 3072 of_dma_controller_free(adev->dev.of_node);
3069 3073
3070 dma_async_device_unregister(&pdmac->ddma); 3074 dma_async_device_unregister(&pdmac->ddma);
3071 amba_set_drvdata(adev, NULL);
3072 3075
3073 /* Idle the DMAC */ 3076 /* Idle the DMAC */
3074 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, 3077 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
@@ -3086,9 +3089,6 @@ static int pl330_remove(struct amba_device *adev)
3086 3089
3087 pl330_del(pi); 3090 pl330_del(pi);
3088 3091
3089 irq = adev->irq[0];
3090 free_irq(irq, pi);
3091
3092 return 0; 3092 return 0;
3093} 3093}
3094 3094
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index e24b5ef486b5..8da48c6b2a38 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -804,218 +804,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
804} 804}
805 805
806/** 806/**
807 * ppc440spe_desc_get_src_addr - extract the source address from the descriptor
808 */
809static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
810 struct ppc440spe_adma_chan *chan, int src_idx)
811{
812 struct dma_cdb *dma_hw_desc;
813 struct xor_cb *xor_hw_desc;
814
815 switch (chan->device->id) {
816 case PPC440SPE_DMA0_ID:
817 case PPC440SPE_DMA1_ID:
818 dma_hw_desc = desc->hw_desc;
819 /* May have 0, 1, 2, or 3 sources */
820 switch (dma_hw_desc->opc) {
821 case DMA_CDB_OPC_NO_OP:
822 case DMA_CDB_OPC_DFILL128:
823 return 0;
824 case DMA_CDB_OPC_DCHECK128:
825 if (unlikely(src_idx)) {
826 printk(KERN_ERR "%s: try to get %d source for"
827 " DCHECK128\n", __func__, src_idx);
828 BUG();
829 }
830 return le32_to_cpu(dma_hw_desc->sg1l);
831 case DMA_CDB_OPC_MULTICAST:
832 case DMA_CDB_OPC_MV_SG1_SG2:
833 if (unlikely(src_idx > 2)) {
834 printk(KERN_ERR "%s: try to get %d source from"
835 " DMA descr\n", __func__, src_idx);
836 BUG();
837 }
838 if (src_idx) {
839 if (le32_to_cpu(dma_hw_desc->sg1u) &
840 DMA_CUED_XOR_WIN_MSK) {
841 u8 region;
842
843 if (src_idx == 1)
844 return le32_to_cpu(
845 dma_hw_desc->sg1l) +
846 desc->unmap_len;
847
848 region = (le32_to_cpu(
849 dma_hw_desc->sg1u)) >>
850 DMA_CUED_REGION_OFF;
851
852 region &= DMA_CUED_REGION_MSK;
853 switch (region) {
854 case DMA_RXOR123:
855 return le32_to_cpu(
856 dma_hw_desc->sg1l) +
857 (desc->unmap_len << 1);
858 case DMA_RXOR124:
859 return le32_to_cpu(
860 dma_hw_desc->sg1l) +
861 (desc->unmap_len * 3);
862 case DMA_RXOR125:
863 return le32_to_cpu(
864 dma_hw_desc->sg1l) +
865 (desc->unmap_len << 2);
866 default:
867 printk(KERN_ERR
868 "%s: try to"
869 " get src3 for region %02x"
870 "PPC440SPE_DESC_RXOR12?\n",
871 __func__, region);
872 BUG();
873 }
874 } else {
875 printk(KERN_ERR
876 "%s: try to get %d"
877 " source for non-cued descr\n",
878 __func__, src_idx);
879 BUG();
880 }
881 }
882 return le32_to_cpu(dma_hw_desc->sg1l);
883 default:
884 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
885 __func__, dma_hw_desc->opc);
886 BUG();
887 }
888 return le32_to_cpu(dma_hw_desc->sg1l);
889 case PPC440SPE_XOR_ID:
890 /* May have up to 16 sources */
891 xor_hw_desc = desc->hw_desc;
892 return xor_hw_desc->ops[src_idx].l;
893 }
894 return 0;
895}
896
897/**
898 * ppc440spe_desc_get_dest_addr - extract the destination address from the
899 * descriptor
900 */
901static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
902 struct ppc440spe_adma_chan *chan, int idx)
903{
904 struct dma_cdb *dma_hw_desc;
905 struct xor_cb *xor_hw_desc;
906
907 switch (chan->device->id) {
908 case PPC440SPE_DMA0_ID:
909 case PPC440SPE_DMA1_ID:
910 dma_hw_desc = desc->hw_desc;
911
912 if (likely(!idx))
913 return le32_to_cpu(dma_hw_desc->sg2l);
914 return le32_to_cpu(dma_hw_desc->sg3l);
915 case PPC440SPE_XOR_ID:
916 xor_hw_desc = desc->hw_desc;
917 return xor_hw_desc->cbtal;
918 }
919 return 0;
920}
921
922/**
923 * ppc440spe_desc_get_src_num - extract the number of source addresses from
924 * the descriptor
925 */
926static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
927 struct ppc440spe_adma_chan *chan)
928{
929 struct dma_cdb *dma_hw_desc;
930 struct xor_cb *xor_hw_desc;
931
932 switch (chan->device->id) {
933 case PPC440SPE_DMA0_ID:
934 case PPC440SPE_DMA1_ID:
935 dma_hw_desc = desc->hw_desc;
936
937 switch (dma_hw_desc->opc) {
938 case DMA_CDB_OPC_NO_OP:
939 case DMA_CDB_OPC_DFILL128:
940 return 0;
941 case DMA_CDB_OPC_DCHECK128:
942 return 1;
943 case DMA_CDB_OPC_MV_SG1_SG2:
944 case DMA_CDB_OPC_MULTICAST:
945 /*
946 * Only for RXOR operations we have more than
947 * one source
948 */
949 if (le32_to_cpu(dma_hw_desc->sg1u) &
950 DMA_CUED_XOR_WIN_MSK) {
951 /* RXOR op, there are 2 or 3 sources */
952 if (((le32_to_cpu(dma_hw_desc->sg1u) >>
953 DMA_CUED_REGION_OFF) &
954 DMA_CUED_REGION_MSK) == DMA_RXOR12) {
955 /* RXOR 1-2 */
956 return 2;
957 } else {
958 /* RXOR 1-2-3/1-2-4/1-2-5 */
959 return 3;
960 }
961 }
962 return 1;
963 default:
964 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
965 __func__, dma_hw_desc->opc);
966 BUG();
967 }
968 case PPC440SPE_XOR_ID:
969 /* up to 16 sources */
970 xor_hw_desc = desc->hw_desc;
971 return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
972 default:
973 BUG();
974 }
975 return 0;
976}
977
978/**
979 * ppc440spe_desc_get_dst_num - get the number of destination addresses in
980 * this descriptor
981 */
982static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
983 struct ppc440spe_adma_chan *chan)
984{
985 struct dma_cdb *dma_hw_desc;
986
987 switch (chan->device->id) {
988 case PPC440SPE_DMA0_ID:
989 case PPC440SPE_DMA1_ID:
990 /* May be 1 or 2 destinations */
991 dma_hw_desc = desc->hw_desc;
992 switch (dma_hw_desc->opc) {
993 case DMA_CDB_OPC_NO_OP:
994 case DMA_CDB_OPC_DCHECK128:
995 return 0;
996 case DMA_CDB_OPC_MV_SG1_SG2:
997 case DMA_CDB_OPC_DFILL128:
998 return 1;
999 case DMA_CDB_OPC_MULTICAST:
1000 if (desc->dst_cnt == 2)
1001 return 2;
1002 else
1003 return 1;
1004 default:
1005 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
1006 __func__, dma_hw_desc->opc);
1007 BUG();
1008 }
1009 case PPC440SPE_XOR_ID:
1010 /* Always only 1 destination */
1011 return 1;
1012 default:
1013 BUG();
1014 }
1015 return 0;
1016}
1017
1018/**
1019 * ppc440spe_desc_get_link - get the address of the descriptor that 807 * ppc440spe_desc_get_link - get the address of the descriptor that
1020 * follows this one 808 * follows this one
1021 */ 809 */
@@ -1707,43 +1495,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
1707 } 1495 }
1708} 1496}
1709 1497
1710static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
1711 struct ppc440spe_adma_desc_slot *desc)
1712{
1713 u32 src_cnt, dst_cnt;
1714 dma_addr_t addr;
1715
1716 /*
1717 * get the number of sources & destination
1718 * included in this descriptor and unmap
1719 * them all
1720 */
1721 src_cnt = ppc440spe_desc_get_src_num(desc, chan);
1722 dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
1723
1724 /* unmap destinations */
1725 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1726 while (dst_cnt--) {
1727 addr = ppc440spe_desc_get_dest_addr(
1728 desc, chan, dst_cnt);
1729 dma_unmap_page(chan->device->dev,
1730 addr, desc->unmap_len,
1731 DMA_FROM_DEVICE);
1732 }
1733 }
1734
1735 /* unmap sources */
1736 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1737 while (src_cnt--) {
1738 addr = ppc440spe_desc_get_src_addr(
1739 desc, chan, src_cnt);
1740 dma_unmap_page(chan->device->dev,
1741 addr, desc->unmap_len,
1742 DMA_TO_DEVICE);
1743 }
1744 }
1745}
1746
1747/** 1498/**
1748 * ppc440spe_adma_run_tx_complete_actions - call functions to be called 1499 * ppc440spe_adma_run_tx_complete_actions - call functions to be called
1749 * upon completion 1500 * upon completion
@@ -1767,26 +1518,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1767 desc->async_tx.callback( 1518 desc->async_tx.callback(
1768 desc->async_tx.callback_param); 1519 desc->async_tx.callback_param);
1769 1520
1770 /* unmap dma addresses 1521 dma_descriptor_unmap(&desc->async_tx);
1771 * (unmap_single vs unmap_page?)
1772 *
1773 * actually, ppc's dma_unmap_page() functions are empty, so
1774 * the following code is just for the sake of completeness
1775 */
1776 if (chan && chan->needs_unmap && desc->group_head &&
1777 desc->unmap_len) {
1778 struct ppc440spe_adma_desc_slot *unmap =
1779 desc->group_head;
1780 /* assume 1 slot per op always */
1781 u32 slot_count = unmap->slot_cnt;
1782
1783 /* Run through the group list and unmap addresses */
1784 for (i = 0; i < slot_count; i++) {
1785 BUG_ON(!unmap);
1786 ppc440spe_adma_unmap(chan, unmap);
1787 unmap = unmap->hw_next;
1788 }
1789 }
1790 } 1522 }
1791 1523
1792 /* run dependent operations */ 1524 /* run dependent operations */
@@ -3893,7 +3625,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3893 3625
3894 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3626 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3895 ret = dma_cookie_status(chan, cookie, txstate); 3627 ret = dma_cookie_status(chan, cookie, txstate);
3896 if (ret == DMA_SUCCESS) 3628 if (ret == DMA_COMPLETE)
3897 return ret; 3629 return ret;
3898 3630
3899 ppc440spe_adma_slot_cleanup(ppc440spe_chan); 3631 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 4cb127978636..4eddedb6eb7d 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -628,42 +628,13 @@ retry:
628 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; 628 s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
629} 629}
630 630
631static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd)
632{
633 struct device *dev = txd->vd.tx.chan->device->dev;
634 struct s3c24xx_sg *dsg;
635
636 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
637 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
638 list_for_each_entry(dsg, &txd->dsg_list, node)
639 dma_unmap_single(dev, dsg->src_addr, dsg->len,
640 DMA_TO_DEVICE);
641 else {
642 list_for_each_entry(dsg, &txd->dsg_list, node)
643 dma_unmap_page(dev, dsg->src_addr, dsg->len,
644 DMA_TO_DEVICE);
645 }
646 }
647
648 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
649 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
650 list_for_each_entry(dsg, &txd->dsg_list, node)
651 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
652 DMA_FROM_DEVICE);
653 else
654 list_for_each_entry(dsg, &txd->dsg_list, node)
655 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
656 DMA_FROM_DEVICE);
657 }
658}
659
660static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) 631static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
661{ 632{
662 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); 633 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
663 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); 634 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
664 635
665 if (!s3cchan->slave) 636 if (!s3cchan->slave)
666 s3c24xx_dma_unmap_buffers(txd); 637 dma_descriptor_unmap(&vd->tx);
667 638
668 s3c24xx_dma_free_txd(txd); 639 s3c24xx_dma_free_txd(txd);
669} 640}
@@ -795,7 +766,7 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
795 766
796 spin_lock_irqsave(&s3cchan->vc.lock, flags); 767 spin_lock_irqsave(&s3cchan->vc.lock, flags);
797 ret = dma_cookie_status(chan, cookie, txstate); 768 ret = dma_cookie_status(chan, cookie, txstate);
798 if (ret == DMA_SUCCESS) { 769 if (ret == DMA_COMPLETE) {
799 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 770 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
800 return ret; 771 return ret;
801 } 772 }
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 461a91ab70bb..ab26d46bbe15 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
436 enum dma_status ret; 436 enum dma_status ret;
437 437
438 ret = dma_cookie_status(&c->vc.chan, cookie, state); 438 ret = dma_cookie_status(&c->vc.chan, cookie, state);
439 if (ret == DMA_SUCCESS) 439 if (ret == DMA_COMPLETE)
440 return ret; 440 return ret;
441 441
442 if (!state) 442 if (!state)
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index ebad84591a6e..3083d901a414 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -60,6 +60,7 @@
60#define HPB_DMAE_DSTPR_DMSTP BIT(0) 60#define HPB_DMAE_DSTPR_DMSTP BIT(0)
61 61
62/* DMA status register (DSTSR) bits */ 62/* DMA status register (DSTSR) bits */
63#define HPB_DMAE_DSTSR_DQSTS BIT(2)
63#define HPB_DMAE_DSTSR_DMSTS BIT(0) 64#define HPB_DMAE_DSTSR_DMSTS BIT(0)
64 65
65/* DMA common registers */ 66/* DMA common registers */
@@ -286,6 +287,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan)
286 287
287 ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); 288 ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
288 ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); 289 ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
290
291 chan->plane_idx = 0;
292 chan->first_desc = true;
289} 293}
290 294
291static const struct hpb_dmae_slave_config * 295static const struct hpb_dmae_slave_config *
@@ -385,7 +389,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
385 struct hpb_dmae_chan *chan = to_chan(schan); 389 struct hpb_dmae_chan *chan = to_chan(schan);
386 u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); 390 u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
387 391
388 return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS; 392 if (chan->xfer_mode == XFER_DOUBLE)
393 return dstsr & HPB_DMAE_DSTSR_DQSTS;
394 else
395 return dstsr & HPB_DMAE_DSTSR_DMSTS;
389} 396}
390 397
391static int 398static int
@@ -510,6 +517,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
510 } 517 }
511 518
512 schan = &new_hpb_chan->shdma_chan; 519 schan = &new_hpb_chan->shdma_chan;
520 schan->max_xfer_len = HPB_DMA_TCR_MAX;
521
513 shdma_chan_probe(sdev, schan, id); 522 shdma_chan_probe(sdev, schan, id);
514 523
515 if (pdev->id >= 0) 524 if (pdev->id >= 0)
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index d94ab592cc1b..2e7b394def80 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan,
724 * If we don't find cookie on the queue, it has been aborted and we have 724 * If we don't find cookie on the queue, it has been aborted and we have
725 * to report error 725 * to report error
726 */ 726 */
727 if (status != DMA_SUCCESS) { 727 if (status != DMA_COMPLETE) {
728 struct shdma_desc *sdesc; 728 struct shdma_desc *sdesc;
729 status = DMA_ERROR; 729 status = DMA_ERROR;
730 list_for_each_entry(sdesc, &schan->ld_queue, node) 730 list_for_each_entry(sdesc, &schan->ld_queue, node)
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 1069e8869f20..0d765c0e21ec 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
685static int sh_dmae_probe(struct platform_device *pdev) 685static int sh_dmae_probe(struct platform_device *pdev)
686{ 686{
687 const struct sh_dmae_pdata *pdata; 687 const struct sh_dmae_pdata *pdata;
688 unsigned long irqflags = IRQF_DISABLED, 688 unsigned long irqflags = 0,
689 chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 689 chan_flag[SH_DMAE_MAX_CHANNELS] = {};
690 int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; 690 int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
691 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 691 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
@@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
838 IORESOURCE_IRQ_SHAREABLE) 838 IORESOURCE_IRQ_SHAREABLE)
839 chan_flag[irq_cnt] = IRQF_SHARED; 839 chan_flag[irq_cnt] = IRQF_SHARED;
840 else 840 else
841 chan_flag[irq_cnt] = IRQF_DISABLED; 841 chan_flag[irq_cnt] = 0;
842 dev_dbg(&pdev->dev, 842 dev_dbg(&pdev->dev,
843 "Found IRQ %d for channel %d\n", 843 "Found IRQ %d for channel %d\n",
844 i, irq_cnt); 844 i, irq_cnt);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 82d2b97ad942..b8c031b7de4e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,7 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/log2.h>
17#include <linux/pm.h> 18#include <linux/pm.h>
18#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
19#include <linux/err.h> 20#include <linux/err.h>
@@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
2626 } 2627 }
2627 2628
2628 ret = dma_cookie_status(chan, cookie, txstate); 2629 ret = dma_cookie_status(chan, cookie, txstate);
2629 if (ret != DMA_SUCCESS) 2630 if (ret != DMA_COMPLETE)
2630 dma_set_residue(txstate, stedma40_residue(chan)); 2631 dma_set_residue(txstate, stedma40_residue(chan));
2631 2632
2632 if (d40_is_paused(d40c)) 2633 if (d40_is_paused(d40c))
@@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2796 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2797 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2797 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || 2798 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2798 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2799 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2799 ((src_addr_width > 1) && (src_addr_width & 1)) || 2800 !is_power_of_2(src_addr_width) ||
2800 ((dst_addr_width > 1) && (dst_addr_width & 1))) 2801 !is_power_of_2(dst_addr_width))
2801 return -EINVAL; 2802 return -EINVAL;
2802 2803
2803 cfg->src_info.data_width = src_addr_width; 2804 cfg->src_info.data_width = src_addr_width;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 5d4986e5f5fa..73654e33f13b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
570 570
571 list_del(&sgreq->node); 571 list_del(&sgreq->node);
572 if (sgreq->last_sg) { 572 if (sgreq->last_sg) {
573 dma_desc->dma_status = DMA_SUCCESS; 573 dma_desc->dma_status = DMA_COMPLETE;
574 dma_cookie_complete(&dma_desc->txd); 574 dma_cookie_complete(&dma_desc->txd);
575 if (!dma_desc->cb_count) 575 if (!dma_desc->cb_count)
576 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 576 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
@@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
768 unsigned int residual; 768 unsigned int residual;
769 769
770 ret = dma_cookie_status(dc, cookie, txstate); 770 ret = dma_cookie_status(dc, cookie, txstate);
771 if (ret == DMA_SUCCESS) 771 if (ret == DMA_COMPLETE)
772 return ret; 772 return ret;
773 773
774 spin_lock_irqsave(&tdc->lock, flags); 774 spin_lock_irqsave(&tdc->lock, flags);
@@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
1018 return &dma_desc->txd; 1018 return &dma_desc->txd;
1019} 1019}
1020 1020
1021struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( 1021static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1022 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 1022 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1023 size_t period_len, enum dma_transfer_direction direction, 1023 size_t period_len, enum dma_transfer_direction direction,
1024 unsigned long flags, void *context) 1024 unsigned long flags, void *context)
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 28af214fce04..4506a7b4f972 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
154 return done; 154 return done;
155} 155}
156 156
157static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
158 bool single)
159{
160 dma_addr_t addr;
161 int len;
162
163 addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
164 dma_desc[4];
165
166 len = (dma_desc[3] << 8) | dma_desc[2];
167
168 if (single)
169 dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
170 DMA_TO_DEVICE);
171 else
172 dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
173 DMA_TO_DEVICE);
174}
175
176static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
177{
178 struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
179 struct timb_dma_chan, chan);
180 u8 *descs;
181
182 for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
183 __td_unmap_desc(td_chan, descs, single);
184 if (descs[0] & 0x02)
185 break;
186 }
187}
188
189static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, 157static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
190 struct scatterlist *sg, bool last) 158 struct scatterlist *sg, bool last)
191{ 159{
@@ -293,10 +261,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
293 261
294 list_move(&td_desc->desc_node, &td_chan->free_list); 262 list_move(&td_desc->desc_node, &td_chan->free_list);
295 263
296 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) 264 dma_descriptor_unmap(txd);
297 __td_unmap_descs(td_desc,
298 txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
299
300 /* 265 /*
301 * The API requires that no submissions are done from a 266 * The API requires that no submissions are done from a
302 * callback, so we don't need to drop the lock here 267 * callback, so we don't need to drop the lock here
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 71e8e775189e..bae6c29f5502 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -419,30 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
419 list_splice_init(&desc->tx_list, &dc->free_list); 419 list_splice_init(&desc->tx_list, &dc->free_list);
420 list_move(&desc->desc_node, &dc->free_list); 420 list_move(&desc->desc_node, &dc->free_list);
421 421
422 if (!ds) { 422 dma_descriptor_unmap(txd);
423 dma_addr_t dmaaddr;
424 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
425 dmaaddr = is_dmac64(dc) ?
426 desc->hwdesc.DAR : desc->hwdesc32.DAR;
427 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
428 dma_unmap_single(chan2parent(&dc->chan),
429 dmaaddr, desc->len, DMA_FROM_DEVICE);
430 else
431 dma_unmap_page(chan2parent(&dc->chan),
432 dmaaddr, desc->len, DMA_FROM_DEVICE);
433 }
434 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
435 dmaaddr = is_dmac64(dc) ?
436 desc->hwdesc.SAR : desc->hwdesc32.SAR;
437 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
438 dma_unmap_single(chan2parent(&dc->chan),
439 dmaaddr, desc->len, DMA_TO_DEVICE);
440 else
441 dma_unmap_page(chan2parent(&dc->chan),
442 dmaaddr, desc->len, DMA_TO_DEVICE);
443 }
444 }
445
446 /* 423 /*
447 * The API requires that no submissions are done from a 424 * The API requires that no submissions are done from a
448 * callback, so we don't need to drop the lock here 425 * callback, so we don't need to drop the lock here
@@ -962,8 +939,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
962 enum dma_status ret; 939 enum dma_status ret;
963 940
964 ret = dma_cookie_status(chan, cookie, txstate); 941 ret = dma_cookie_status(chan, cookie, txstate);
965 if (ret == DMA_SUCCESS) 942 if (ret == DMA_COMPLETE)
966 return DMA_SUCCESS; 943 return DMA_COMPLETE;
967 944
968 spin_lock_bh(&dc->lock); 945 spin_lock_bh(&dc->lock);
969 txx9dmac_scan_descriptors(dc); 946 txx9dmac_scan_descriptors(dc);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 8472405c5586..d7f1b57bd3be 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -945,7 +945,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
945 u32 tad_offset; 945 u32 tad_offset;
946 u32 rir_way; 946 u32 rir_way;
947 u32 mb, kb; 947 u32 mb, kb;
948 u64 ch_addr, offset, limit, prv = 0; 948 u64 ch_addr, offset, limit = 0, prv = 0;
949 949
950 950
951 /* 951 /*
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 3c55ec856e39..a287cece0593 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1082,7 +1082,7 @@ static void arizona_micd_set_level(struct arizona *arizona, int index,
1082static int arizona_extcon_probe(struct platform_device *pdev) 1082static int arizona_extcon_probe(struct platform_device *pdev)
1083{ 1083{
1084 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); 1084 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
1085 struct arizona_pdata *pdata; 1085 struct arizona_pdata *pdata = &arizona->pdata;
1086 struct arizona_extcon_info *info; 1086 struct arizona_extcon_info *info;
1087 unsigned int val; 1087 unsigned int val;
1088 int jack_irq_fall, jack_irq_rise; 1088 int jack_irq_fall, jack_irq_rise;
@@ -1091,8 +1091,6 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1091 if (!arizona->dapm || !arizona->dapm->card) 1091 if (!arizona->dapm || !arizona->dapm->card)
1092 return -EPROBE_DEFER; 1092 return -EPROBE_DEFER;
1093 1093
1094 pdata = dev_get_platdata(arizona->dev);
1095
1096 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 1094 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
1097 if (!info) { 1095 if (!info) {
1098 dev_err(&pdev->dev, "Failed to allocate memory\n"); 1096 dev_err(&pdev->dev, "Failed to allocate memory\n");
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 15443d3b6be1..76322330cbd7 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -792,6 +792,8 @@ void extcon_dev_unregister(struct extcon_dev *edev)
792 return; 792 return;
793 } 793 }
794 794
795 device_unregister(&edev->dev);
796
795 if (edev->mutually_exclusive && edev->max_supported) { 797 if (edev->mutually_exclusive && edev->max_supported) {
796 for (index = 0; edev->mutually_exclusive[index]; 798 for (index = 0; edev->mutually_exclusive[index];
797 index++) 799 index++)
@@ -812,7 +814,6 @@ void extcon_dev_unregister(struct extcon_dev *edev)
812 if (switch_class) 814 if (switch_class)
813 class_compat_remove_link(switch_class, &edev->dev, NULL); 815 class_compat_remove_link(switch_class, &edev->dev, NULL);
814#endif 816#endif
815 device_unregister(&edev->dev);
816 put_device(&edev->dev); 817 put_device(&edev->dev);
817} 818}
818EXPORT_SYMBOL_GPL(extcon_dev_unregister); 819EXPORT_SYMBOL_GPL(extcon_dev_unregister);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 281029daf98c..b0bb056458a3 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1623,6 +1623,7 @@ static struct scsi_host_template scsi_driver_template = {
1623 .cmd_per_lun = 1, 1623 .cmd_per_lun = 1,
1624 .can_queue = 1, 1624 .can_queue = 1,
1625 .sdev_attrs = sbp2_scsi_sysfs_attrs, 1625 .sdev_attrs = sbp2_scsi_sysfs_attrs,
1626 .no_write_same = 1,
1626}; 1627};
1627 1628
1628MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 1629MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 5002d50e3781..743fd426f21b 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -18,14 +18,12 @@ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
18 18
19static int efi_pstore_open(struct pstore_info *psi) 19static int efi_pstore_open(struct pstore_info *psi)
20{ 20{
21 efivar_entry_iter_begin();
22 psi->data = NULL; 21 psi->data = NULL;
23 return 0; 22 return 0;
24} 23}
25 24
26static int efi_pstore_close(struct pstore_info *psi) 25static int efi_pstore_close(struct pstore_info *psi)
27{ 26{
28 efivar_entry_iter_end();
29 psi->data = NULL; 27 psi->data = NULL;
30 return 0; 28 return 0;
31} 29}
@@ -39,6 +37,12 @@ struct pstore_read_data {
39 char **buf; 37 char **buf;
40}; 38};
41 39
40static inline u64 generic_id(unsigned long timestamp,
41 unsigned int part, int count)
42{
43 return (timestamp * 100 + part) * 1000 + count;
44}
45
42static int efi_pstore_read_func(struct efivar_entry *entry, void *data) 46static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
43{ 47{
44 efi_guid_t vendor = LINUX_EFI_CRASH_GUID; 48 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
@@ -57,7 +61,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
57 61
58 if (sscanf(name, "dump-type%u-%u-%d-%lu-%c", 62 if (sscanf(name, "dump-type%u-%u-%d-%lu-%c",
59 cb_data->type, &part, &cnt, &time, &data_type) == 5) { 63 cb_data->type, &part, &cnt, &time, &data_type) == 5) {
60 *cb_data->id = part; 64 *cb_data->id = generic_id(time, part, cnt);
61 *cb_data->count = cnt; 65 *cb_data->count = cnt;
62 cb_data->timespec->tv_sec = time; 66 cb_data->timespec->tv_sec = time;
63 cb_data->timespec->tv_nsec = 0; 67 cb_data->timespec->tv_nsec = 0;
@@ -67,7 +71,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
67 *cb_data->compressed = false; 71 *cb_data->compressed = false;
68 } else if (sscanf(name, "dump-type%u-%u-%d-%lu", 72 } else if (sscanf(name, "dump-type%u-%u-%d-%lu",
69 cb_data->type, &part, &cnt, &time) == 4) { 73 cb_data->type, &part, &cnt, &time) == 4) {
70 *cb_data->id = part; 74 *cb_data->id = generic_id(time, part, cnt);
71 *cb_data->count = cnt; 75 *cb_data->count = cnt;
72 cb_data->timespec->tv_sec = time; 76 cb_data->timespec->tv_sec = time;
73 cb_data->timespec->tv_nsec = 0; 77 cb_data->timespec->tv_nsec = 0;
@@ -79,7 +83,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
79 * which doesn't support holding 83 * which doesn't support holding
80 * multiple logs, remains. 84 * multiple logs, remains.
81 */ 85 */
82 *cb_data->id = part; 86 *cb_data->id = generic_id(time, part, 0);
83 *cb_data->count = 0; 87 *cb_data->count = 0;
84 cb_data->timespec->tv_sec = time; 88 cb_data->timespec->tv_sec = time;
85 cb_data->timespec->tv_nsec = 0; 89 cb_data->timespec->tv_nsec = 0;
@@ -91,19 +95,125 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
91 __efivar_entry_get(entry, &entry->var.Attributes, 95 __efivar_entry_get(entry, &entry->var.Attributes,
92 &entry->var.DataSize, entry->var.Data); 96 &entry->var.DataSize, entry->var.Data);
93 size = entry->var.DataSize; 97 size = entry->var.DataSize;
98 memcpy(*cb_data->buf, entry->var.Data,
99 (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size));
94 100
95 *cb_data->buf = kmemdup(entry->var.Data, size, GFP_KERNEL);
96 if (*cb_data->buf == NULL)
97 return -ENOMEM;
98 return size; 101 return size;
99} 102}
100 103
104/**
105 * efi_pstore_scan_sysfs_enter
106 * @entry: scanning entry
107 * @next: next entry
108 * @head: list head
109 */
110static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos,
111 struct efivar_entry *next,
112 struct list_head *head)
113{
114 pos->scanning = true;
115 if (&next->list != head)
116 next->scanning = true;
117}
118
119/**
120 * __efi_pstore_scan_sysfs_exit
121 * @entry: deleting entry
122 * @turn_off_scanning: Check if a scanning flag should be turned off
123 */
124static inline void __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry,
125 bool turn_off_scanning)
126{
127 if (entry->deleting) {
128 list_del(&entry->list);
129 efivar_entry_iter_end();
130 efivar_unregister(entry);
131 efivar_entry_iter_begin();
132 } else if (turn_off_scanning)
133 entry->scanning = false;
134}
135
136/**
137 * efi_pstore_scan_sysfs_exit
138 * @pos: scanning entry
139 * @next: next entry
140 * @head: list head
141 * @stop: a flag checking if scanning will stop
142 */
143static void efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
144 struct efivar_entry *next,
145 struct list_head *head, bool stop)
146{
147 __efi_pstore_scan_sysfs_exit(pos, true);
148 if (stop)
149 __efi_pstore_scan_sysfs_exit(next, &next->list != head);
150}
151
152/**
153 * efi_pstore_sysfs_entry_iter
154 *
155 * @data: function-specific data to pass to callback
156 * @pos: entry to begin iterating from
157 *
158 * You MUST call efivar_enter_iter_begin() before this function, and
159 * efivar_entry_iter_end() afterwards.
160 *
161 * It is possible to begin iteration from an arbitrary entry within
162 * the list by passing @pos. @pos is updated on return to point to
163 * the next entry of the last one passed to efi_pstore_read_func().
164 * To begin iterating from the beginning of the list @pos must be %NULL.
165 */
166static int efi_pstore_sysfs_entry_iter(void *data, struct efivar_entry **pos)
167{
168 struct efivar_entry *entry, *n;
169 struct list_head *head = &efivar_sysfs_list;
170 int size = 0;
171
172 if (!*pos) {
173 list_for_each_entry_safe(entry, n, head, list) {
174 efi_pstore_scan_sysfs_enter(entry, n, head);
175
176 size = efi_pstore_read_func(entry, data);
177 efi_pstore_scan_sysfs_exit(entry, n, head, size < 0);
178 if (size)
179 break;
180 }
181 *pos = n;
182 return size;
183 }
184
185 list_for_each_entry_safe_from((*pos), n, head, list) {
186 efi_pstore_scan_sysfs_enter((*pos), n, head);
187
188 size = efi_pstore_read_func((*pos), data);
189 efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0);
190 if (size)
191 break;
192 }
193 *pos = n;
194 return size;
195}
196
197/**
198 * efi_pstore_read
199 *
200 * This function returns a size of NVRAM entry logged via efi_pstore_write().
201 * The meaning and behavior of efi_pstore/pstore are as below.
202 *
203 * size > 0: Got data of an entry logged via efi_pstore_write() successfully,
204 * and pstore filesystem will continue reading subsequent entries.
205 * size == 0: Entry was not logged via efi_pstore_write(),
206 * and efi_pstore driver will continue reading subsequent entries.
207 * size < 0: Failed to get data of entry logging via efi_pstore_write(),
208 * and pstore will stop reading entry.
209 */
101static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, 210static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
102 int *count, struct timespec *timespec, 211 int *count, struct timespec *timespec,
103 char **buf, bool *compressed, 212 char **buf, bool *compressed,
104 struct pstore_info *psi) 213 struct pstore_info *psi)
105{ 214{
106 struct pstore_read_data data; 215 struct pstore_read_data data;
216 ssize_t size;
107 217
108 data.id = id; 218 data.id = id;
109 data.type = type; 219 data.type = type;
@@ -112,8 +222,17 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
112 data.compressed = compressed; 222 data.compressed = compressed;
113 data.buf = buf; 223 data.buf = buf;
114 224
115 return __efivar_entry_iter(efi_pstore_read_func, &efivar_sysfs_list, &data, 225 *data.buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
116 (struct efivar_entry **)&psi->data); 226 if (!*data.buf)
227 return -ENOMEM;
228
229 efivar_entry_iter_begin();
230 size = efi_pstore_sysfs_entry_iter(&data,
231 (struct efivar_entry **)&psi->data);
232 efivar_entry_iter_end();
233 if (size <= 0)
234 kfree(*data.buf);
235 return size;
117} 236}
118 237
119static int efi_pstore_write(enum pstore_type_id type, 238static int efi_pstore_write(enum pstore_type_id type,
@@ -184,9 +303,17 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
184 return 0; 303 return 0;
185 } 304 }
186 305
306 if (entry->scanning) {
307 /*
308 * Skip deletion because this entry will be deleted
309 * after scanning is completed.
310 */
311 entry->deleting = true;
312 } else
313 list_del(&entry->list);
314
187 /* found */ 315 /* found */
188 __efivar_entry_delete(entry); 316 __efivar_entry_delete(entry);
189 list_del(&entry->list);
190 317
191 return 1; 318 return 1;
192} 319}
@@ -199,14 +326,16 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
199 char name[DUMP_NAME_LEN]; 326 char name[DUMP_NAME_LEN];
200 efi_char16_t efi_name[DUMP_NAME_LEN]; 327 efi_char16_t efi_name[DUMP_NAME_LEN];
201 int found, i; 328 int found, i;
329 unsigned int part;
202 330
203 sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count, 331 do_div(id, 1000);
204 time.tv_sec); 332 part = do_div(id, 100);
333 sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count, time.tv_sec);
205 334
206 for (i = 0; i < DUMP_NAME_LEN; i++) 335 for (i = 0; i < DUMP_NAME_LEN; i++)
207 efi_name[i] = name[i]; 336 efi_name[i] = name[i];
208 337
209 edata.id = id; 338 edata.id = part;
210 edata.type = type; 339 edata.type = type;
211 edata.count = count; 340 edata.count = count;
212 edata.time = time; 341 edata.time = time;
@@ -214,10 +343,12 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
214 343
215 efivar_entry_iter_begin(); 344 efivar_entry_iter_begin();
216 found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list, &edata, &entry); 345 found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list, &edata, &entry);
217 efivar_entry_iter_end();
218 346
219 if (found) 347 if (found && !entry->scanning) {
348 efivar_entry_iter_end();
220 efivar_unregister(entry); 349 efivar_unregister(entry);
350 } else
351 efivar_entry_iter_end();
221 352
222 return 0; 353 return 0;
223} 354}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 933eb027d527..3dc248239197 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -383,12 +383,16 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
383 else if (__efivar_entry_delete(entry)) 383 else if (__efivar_entry_delete(entry))
384 err = -EIO; 384 err = -EIO;
385 385
386 efivar_entry_iter_end(); 386 if (err) {
387 387 efivar_entry_iter_end();
388 if (err)
389 return err; 388 return err;
389 }
390 390
391 efivar_unregister(entry); 391 if (!entry->scanning) {
392 efivar_entry_iter_end();
393 efivar_unregister(entry);
394 } else
395 efivar_entry_iter_end();
392 396
393 /* It's dead Jim.... */ 397 /* It's dead Jim.... */
394 return count; 398 return count;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 391c67b182d9..b22659cccca4 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -683,8 +683,16 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
683 if (!found) 683 if (!found)
684 return NULL; 684 return NULL;
685 685
686 if (remove) 686 if (remove) {
687 list_del(&entry->list); 687 if (entry->scanning) {
688 /*
689 * The entry will be deleted
690 * after scanning is completed.
691 */
692 entry->deleting = true;
693 } else
694 list_del(&entry->list);
695 }
688 696
689 return entry; 697 return entry;
690} 698}
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 72c927dc3be1..54c18c220a60 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -158,7 +158,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
158 spin_unlock_irqrestore(&kona_gpio->lock, flags); 158 spin_unlock_irqrestore(&kona_gpio->lock, flags);
159 159
160 /* return the specified bit status */ 160 /* return the specified bit status */
161 return !!(val & bit); 161 return !!(val & BIT(bit));
162} 162}
163 163
164static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) 164static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 8847adf392b7..84be70157ad6 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -327,7 +327,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
327 * NOTE: we assume for now that only irqs in the first gpio_chip 327 * NOTE: we assume for now that only irqs in the first gpio_chip
328 * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). 328 * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
329 */ 329 */
330 if (offset < d->irq_base) 330 if (offset < d->gpio_unbanked)
331 return d->gpio_irq + offset; 331 return d->gpio_irq + offset;
332 else 332 else
333 return -ENODEV; 333 return -ENODEV;
@@ -419,6 +419,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
419 419
420 /* pass "bank 0" GPIO IRQs to AINTC */ 420 /* pass "bank 0" GPIO IRQs to AINTC */
421 chips[0].chip.to_irq = gpio_to_irq_unbanked; 421 chips[0].chip.to_irq = gpio_to_irq_unbanked;
422 chips[0].gpio_irq = bank_irq;
423 chips[0].gpio_unbanked = pdata->gpio_unbanked;
422 binten = BIT(0); 424 binten = BIT(0);
423 425
424 /* AINTC handles mask/unmask; GPIO handles triggering */ 426 /* AINTC handles mask/unmask; GPIO handles triggering */
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 914e859e3eda..d7d6d72eba33 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -70,10 +70,14 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
70 u32 val; 70 u32 val;
71 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); 71 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
72 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); 72 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
73 u32 out_mask, out_shadow;
73 74
74 val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR); 75 out_mask = in_be32(mm->regs + GPIO_DIR);
75 76
76 return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio); 77 val = in_be32(mm->regs + GPIO_DAT) & ~out_mask;
78 out_shadow = mpc8xxx_gc->data & out_mask;
79
80 return (val | out_shadow) & mpc8xxx_gpio2mask(gpio);
77} 81}
78 82
79static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio) 83static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index f7a0cc4da950..7b37300973db 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -102,7 +102,7 @@ struct msm_gpio_dev {
102 DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO); 102 DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO);
103 DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO); 103 DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
104 struct irq_domain *domain; 104 struct irq_domain *domain;
105 unsigned int summary_irq; 105 int summary_irq;
106 void __iomem *msm_tlmm_base; 106 void __iomem *msm_tlmm_base;
107}; 107};
108 108
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 3c3321f94053..db3129043e63 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -79,7 +79,7 @@ struct mvebu_gpio_chip {
79 spinlock_t lock; 79 spinlock_t lock;
80 void __iomem *membase; 80 void __iomem *membase;
81 void __iomem *percpu_membase; 81 void __iomem *percpu_membase;
82 unsigned int irqbase; 82 int irqbase;
83 struct irq_domain *domain; 83 struct irq_domain *domain;
84 int soc_variant; 84 int soc_variant;
85}; 85};
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index f22f7f3e2e53..b4d42112d02d 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -286,11 +286,6 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
286 if (!chip->base) 286 if (!chip->base)
287 return -ENOMEM; 287 return -ENOMEM;
288 288
289 chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR,
290 irq_base, &pl061_domain_ops, chip);
291 if (!chip->domain)
292 return -ENODEV;
293
294 spin_lock_init(&chip->lock); 289 spin_lock_init(&chip->lock);
295 290
296 chip->gc.request = pl061_gpio_request; 291 chip->gc.request = pl061_gpio_request;
@@ -320,6 +315,11 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
320 irq_set_chained_handler(irq, pl061_irq_handler); 315 irq_set_chained_handler(irq, pl061_irq_handler);
321 irq_set_handler_data(irq, chip); 316 irq_set_handler_data(irq, chip);
322 317
318 chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR,
319 irq_base, &pl061_domain_ops, chip);
320 if (!chip->domain)
321 return -ENODEV;
322
323 for (i = 0; i < PL061_GPIO_NR; i++) { 323 for (i = 0; i < PL061_GPIO_NR; i++) {
324 if (pdata) { 324 if (pdata) {
325 if (pdata->directions & (1 << i)) 325 if (pdata->directions & (1 << i))
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d3f15ae93bd3..fe088a30567a 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -381,7 +381,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
381 if (!p->irq_domain) { 381 if (!p->irq_domain) {
382 ret = -ENXIO; 382 ret = -ENXIO;
383 dev_err(&pdev->dev, "cannot initialize irq domain\n"); 383 dev_err(&pdev->dev, "cannot initialize irq domain\n");
384 goto err1; 384 goto err0;
385 } 385 }
386 386
387 if (devm_request_irq(&pdev->dev, irq->start, 387 if (devm_request_irq(&pdev->dev, irq->start,
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 0502b9a041a5..da071ddbad99 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -132,6 +132,7 @@ static int tb10x_gpio_direction_out(struct gpio_chip *chip,
132 int mask = BIT(offset); 132 int mask = BIT(offset);
133 int val = TB10X_GPIO_DIR_OUT << offset; 133 int val = TB10X_GPIO_DIR_OUT << offset;
134 134
135 tb10x_gpio_set(chip, offset, value);
135 tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DDR, mask, val); 136 tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DDR, mask, val);
136 137
137 return 0; 138 return 0;
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 0c7e891c8651..b97d6a6577b9 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -354,17 +354,18 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) 354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
355{ 355{
356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); 356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
357 int ret = -EINVAL;
357 358
358 mutex_lock(&priv->mutex); 359 mutex_lock(&priv->mutex);
359 if (offset < TWL4030_GPIO_MAX) 360 if (offset < TWL4030_GPIO_MAX)
360 twl4030_set_gpio_dataout(offset, value); 361 ret = twl4030_set_gpio_direction(offset, 0);
361 362
362 priv->direction |= BIT(offset); 363 priv->direction |= BIT(offset);
363 mutex_unlock(&priv->mutex); 364 mutex_unlock(&priv->mutex);
364 365
365 twl_set(chip, offset, value); 366 twl_set(chip, offset, value);
366 367
367 return 0; 368 return ret;
368} 369}
369 370
370static int twl_to_irq(struct gpio_chip *chip, unsigned offset) 371static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -435,7 +436,8 @@ static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
435 436
436static int gpio_twl4030_remove(struct platform_device *pdev); 437static int gpio_twl4030_remove(struct platform_device *pdev);
437 438
438static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev) 439static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev,
440 struct twl4030_gpio_platform_data *pdata)
439{ 441{
440 struct twl4030_gpio_platform_data *omap_twl_info; 442 struct twl4030_gpio_platform_data *omap_twl_info;
441 443
@@ -443,6 +445,9 @@ static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev)
443 if (!omap_twl_info) 445 if (!omap_twl_info)
444 return NULL; 446 return NULL;
445 447
448 if (pdata)
449 *omap_twl_info = *pdata;
450
446 omap_twl_info->use_leds = of_property_read_bool(dev->of_node, 451 omap_twl_info->use_leds = of_property_read_bool(dev->of_node,
447 "ti,use-leds"); 452 "ti,use-leds");
448 453
@@ -500,7 +505,7 @@ no_irqs:
500 mutex_init(&priv->mutex); 505 mutex_init(&priv->mutex);
501 506
502 if (node) 507 if (node)
503 pdata = of_gpio_twl4030(&pdev->dev); 508 pdata = of_gpio_twl4030(&pdev->dev, pdata);
504 509
505 if (pdata == NULL) { 510 if (pdata == NULL) {
506 dev_err(&pdev->dev, "Platform data is missing\n"); 511 dev_err(&pdev->dev, "Platform data is missing\n");
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 1a605f2a0f55..06fb5cf99ded 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -105,3 +105,4 @@ module_platform_driver(ucb1400_gpio_driver);
105 105
106MODULE_DESCRIPTION("Philips UCB1400 GPIO driver"); 106MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
107MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
108MODULE_ALIAS("platform:ucb1400_gpio");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 7dd446150294..85f772c0b26a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -13,6 +13,8 @@
13#include <linux/acpi_gpio.h> 13#include <linux/acpi_gpio.h>
14#include <linux/idr.h> 14#include <linux/idr.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/acpi.h>
17#include <linux/gpio/driver.h>
16 18
17#define CREATE_TRACE_POINTS 19#define CREATE_TRACE_POINTS
18#include <trace/events/gpio.h> 20#include <trace/events/gpio.h>
@@ -1307,6 +1309,18 @@ struct gpio_chip *gpiochip_find(void *data,
1307} 1309}
1308EXPORT_SYMBOL_GPL(gpiochip_find); 1310EXPORT_SYMBOL_GPL(gpiochip_find);
1309 1311
1312static int gpiochip_match_name(struct gpio_chip *chip, void *data)
1313{
1314 const char *name = data;
1315
1316 return !strcmp(chip->label, name);
1317}
1318
1319static struct gpio_chip *find_chip_by_name(const char *name)
1320{
1321 return gpiochip_find((void *)name, gpiochip_match_name);
1322}
1323
1310#ifdef CONFIG_PINCTRL 1324#ifdef CONFIG_PINCTRL
1311 1325
1312/** 1326/**
@@ -1340,8 +1354,10 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
1340 ret = pinctrl_get_group_pins(pctldev, pin_group, 1354 ret = pinctrl_get_group_pins(pctldev, pin_group,
1341 &pin_range->range.pins, 1355 &pin_range->range.pins,
1342 &pin_range->range.npins); 1356 &pin_range->range.npins);
1343 if (ret < 0) 1357 if (ret < 0) {
1358 kfree(pin_range);
1344 return ret; 1359 return ret;
1360 }
1345 1361
1346 pinctrl_add_gpio_range(pctldev, &pin_range->range); 1362 pinctrl_add_gpio_range(pctldev, &pin_range->range);
1347 1363
@@ -2259,26 +2275,10 @@ void gpiod_add_table(struct gpiod_lookup *table, size_t size)
2259 mutex_unlock(&gpio_lookup_lock); 2275 mutex_unlock(&gpio_lookup_lock);
2260} 2276}
2261 2277
2262/*
2263 * Caller must have a acquired gpio_lookup_lock
2264 */
2265static struct gpio_chip *find_chip_by_name(const char *name)
2266{
2267 struct gpio_chip *chip = NULL;
2268
2269 list_for_each_entry(chip, &gpio_lookup_list, list) {
2270 if (chip->label == NULL)
2271 continue;
2272 if (!strcmp(chip->label, name))
2273 break;
2274 }
2275
2276 return chip;
2277}
2278
2279#ifdef CONFIG_OF 2278#ifdef CONFIG_OF
2280static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, 2279static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
2281 unsigned int idx, unsigned long *flags) 2280 unsigned int idx,
2281 enum gpio_lookup_flags *flags)
2282{ 2282{
2283 char prop_name[32]; /* 32 is max size of property name */ 2283 char prop_name[32]; /* 32 is max size of property name */
2284 enum of_gpio_flags of_flags; 2284 enum of_gpio_flags of_flags;
@@ -2296,20 +2296,22 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
2296 return desc; 2296 return desc;
2297 2297
2298 if (of_flags & OF_GPIO_ACTIVE_LOW) 2298 if (of_flags & OF_GPIO_ACTIVE_LOW)
2299 *flags |= GPIOF_ACTIVE_LOW; 2299 *flags |= GPIO_ACTIVE_LOW;
2300 2300
2301 return desc; 2301 return desc;
2302} 2302}
2303#else 2303#else
2304static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, 2304static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
2305 unsigned int idx, unsigned long *flags) 2305 unsigned int idx,
2306 enum gpio_lookup_flags *flags)
2306{ 2307{
2307 return ERR_PTR(-ENODEV); 2308 return ERR_PTR(-ENODEV);
2308} 2309}
2309#endif 2310#endif
2310 2311
2311static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, 2312static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
2312 unsigned int idx, unsigned long *flags) 2313 unsigned int idx,
2314 enum gpio_lookup_flags *flags)
2313{ 2315{
2314 struct acpi_gpio_info info; 2316 struct acpi_gpio_info info;
2315 struct gpio_desc *desc; 2317 struct gpio_desc *desc;
@@ -2319,13 +2321,14 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
2319 return desc; 2321 return desc;
2320 2322
2321 if (info.gpioint && info.active_low) 2323 if (info.gpioint && info.active_low)
2322 *flags |= GPIOF_ACTIVE_LOW; 2324 *flags |= GPIO_ACTIVE_LOW;
2323 2325
2324 return desc; 2326 return desc;
2325} 2327}
2326 2328
2327static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, 2329static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
2328 unsigned int idx, unsigned long *flags) 2330 unsigned int idx,
2331 enum gpio_lookup_flags *flags)
2329{ 2332{
2330 const char *dev_id = dev ? dev_name(dev) : NULL; 2333 const char *dev_id = dev ? dev_name(dev) : NULL;
2331 struct gpio_desc *desc = ERR_PTR(-ENODEV); 2334 struct gpio_desc *desc = ERR_PTR(-ENODEV);
@@ -2365,7 +2368,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
2365 continue; 2368 continue;
2366 } 2369 }
2367 2370
2368 if (chip->ngpio >= p->chip_hwnum) { 2371 if (chip->ngpio <= p->chip_hwnum) {
2369 dev_warn(dev, "GPIO chip %s has %d GPIOs\n", 2372 dev_warn(dev, "GPIO chip %s has %d GPIOs\n",
2370 chip->label, chip->ngpio); 2373 chip->label, chip->ngpio);
2371 continue; 2374 continue;
@@ -2415,9 +2418,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2415 const char *con_id, 2418 const char *con_id,
2416 unsigned int idx) 2419 unsigned int idx)
2417{ 2420{
2418 struct gpio_desc *desc; 2421 struct gpio_desc *desc = NULL;
2419 int status; 2422 int status;
2420 unsigned long flags = 0; 2423 enum gpio_lookup_flags flags = 0;
2421 2424
2422 dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id); 2425 dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
2423 2426
@@ -2428,13 +2431,23 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2428 } else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) { 2431 } else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) {
2429 dev_dbg(dev, "using ACPI for GPIO lookup\n"); 2432 dev_dbg(dev, "using ACPI for GPIO lookup\n");
2430 desc = acpi_find_gpio(dev, con_id, idx, &flags); 2433 desc = acpi_find_gpio(dev, con_id, idx, &flags);
2431 } else { 2434 }
2435
2436 /*
2437 * Either we are not using DT or ACPI, or their lookup did not return
2438 * a result. In that case, use platform lookup as a fallback.
2439 */
2440 if (!desc || IS_ERR(desc)) {
2441 struct gpio_desc *pdesc;
2432 dev_dbg(dev, "using lookup tables for GPIO lookup"); 2442 dev_dbg(dev, "using lookup tables for GPIO lookup");
2433 desc = gpiod_find(dev, con_id, idx, &flags); 2443 pdesc = gpiod_find(dev, con_id, idx, &flags);
2444 /* If used as fallback, do not replace the previous error */
2445 if (!IS_ERR(pdesc) || !desc)
2446 desc = pdesc;
2434 } 2447 }
2435 2448
2436 if (IS_ERR(desc)) { 2449 if (IS_ERR(desc)) {
2437 dev_warn(dev, "lookup for GPIO %s failed\n", con_id); 2450 dev_dbg(dev, "lookup for GPIO %s failed\n", con_id);
2438 return desc; 2451 return desc;
2439 } 2452 }
2440 2453
@@ -2443,8 +2456,12 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2443 if (status < 0) 2456 if (status < 0)
2444 return ERR_PTR(status); 2457 return ERR_PTR(status);
2445 2458
2446 if (flags & GPIOF_ACTIVE_LOW) 2459 if (flags & GPIO_ACTIVE_LOW)
2447 set_bit(FLAG_ACTIVE_LOW, &desc->flags); 2460 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
2461 if (flags & GPIO_OPEN_DRAIN)
2462 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
2463 if (flags & GPIO_OPEN_SOURCE)
2464 set_bit(FLAG_OPEN_SOURCE, &desc->flags);
2448 2465
2449 return desc; 2466 return desc;
2450} 2467}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fb7cf0e796f6..0a1e4a5f4234 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2674,7 +2674,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
2674 int modes = 0; 2674 int modes = 0;
2675 u8 cea_mode; 2675 u8 cea_mode;
2676 2676
2677 if (video_db == NULL || video_index > video_len) 2677 if (video_db == NULL || video_index >= video_len)
2678 return 0; 2678 return 0;
2679 2679
2680 /* CEA modes are numbered 1..127 */ 2680 /* CEA modes are numbered 1..127 */
@@ -2701,7 +2701,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
2701 if (structure & (1 << 8)) { 2701 if (structure & (1 << 8)) {
2702 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); 2702 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2703 if (newmode) { 2703 if (newmode) {
2704 newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; 2704 newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
2705 drm_mode_probed_add(connector, newmode); 2705 drm_mode_probed_add(connector, newmode);
2706 modes++; 2706 modes++;
2707 } 2707 }
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 1a35ea53106b..c22c3097c3e8 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -489,6 +489,11 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
489} 489}
490EXPORT_SYMBOL(drm_sysfs_hotplug_event); 490EXPORT_SYMBOL(drm_sysfs_hotplug_event);
491 491
492static void drm_sysfs_release(struct device *dev)
493{
494 kfree(dev);
495}
496
492/** 497/**
493 * drm_sysfs_device_add - adds a class device to sysfs for a character driver 498 * drm_sysfs_device_add - adds a class device to sysfs for a character driver
494 * @dev: DRM device to be added 499 * @dev: DRM device to be added
@@ -501,6 +506,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
501int drm_sysfs_device_add(struct drm_minor *minor) 506int drm_sysfs_device_add(struct drm_minor *minor)
502{ 507{
503 char *minor_str; 508 char *minor_str;
509 int r;
504 510
505 if (minor->type == DRM_MINOR_CONTROL) 511 if (minor->type == DRM_MINOR_CONTROL)
506 minor_str = "controlD%d"; 512 minor_str = "controlD%d";
@@ -509,14 +515,34 @@ int drm_sysfs_device_add(struct drm_minor *minor)
509 else 515 else
510 minor_str = "card%d"; 516 minor_str = "card%d";
511 517
512 minor->kdev = device_create(drm_class, minor->dev->dev, 518 minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
513 MKDEV(DRM_MAJOR, minor->index), 519 if (!minor->kdev) {
514 minor, minor_str, minor->index); 520 r = -ENOMEM;
515 if (IS_ERR(minor->kdev)) { 521 goto error;
516 DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
517 return PTR_ERR(minor->kdev);
518 } 522 }
523
524 device_initialize(minor->kdev);
525 minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
526 minor->kdev->class = drm_class;
527 minor->kdev->type = &drm_sysfs_device_minor;
528 minor->kdev->parent = minor->dev->dev;
529 minor->kdev->release = drm_sysfs_release;
530 dev_set_drvdata(minor->kdev, minor);
531
532 r = dev_set_name(minor->kdev, minor_str, minor->index);
533 if (r < 0)
534 goto error;
535
536 r = device_add(minor->kdev);
537 if (r < 0)
538 goto error;
539
519 return 0; 540 return 0;
541
542error:
543 DRM_ERROR("device create failed %d\n", r);
544 put_device(minor->kdev);
545 return r;
520} 546}
521 547
522/** 548/**
@@ -529,7 +555,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
529void drm_sysfs_device_remove(struct drm_minor *minor) 555void drm_sysfs_device_remove(struct drm_minor *minor)
530{ 556{
531 if (minor->kdev) 557 if (minor->kdev)
532 device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index)); 558 device_unregister(minor->kdev);
533 minor->kdev = NULL; 559 minor->kdev = NULL;
534} 560}
535 561
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b676006a95a0..22b8f5eced80 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -173,28 +173,37 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
173static void exynos_drm_preclose(struct drm_device *dev, 173static void exynos_drm_preclose(struct drm_device *dev,
174 struct drm_file *file) 174 struct drm_file *file)
175{ 175{
176 exynos_drm_subdrv_close(dev, file);
177}
178
179static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
180{
176 struct exynos_drm_private *private = dev->dev_private; 181 struct exynos_drm_private *private = dev->dev_private;
177 struct drm_pending_vblank_event *e, *t; 182 struct drm_pending_vblank_event *v, *vt;
183 struct drm_pending_event *e, *et;
178 unsigned long flags; 184 unsigned long flags;
179 185
180 /* release events of current file */ 186 if (!file->driver_priv)
187 return;
188
189 /* Release all events not unhandled by page flip handler. */
181 spin_lock_irqsave(&dev->event_lock, flags); 190 spin_lock_irqsave(&dev->event_lock, flags);
182 list_for_each_entry_safe(e, t, &private->pageflip_event_list, 191 list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
183 base.link) { 192 base.link) {
184 if (e->base.file_priv == file) { 193 if (v->base.file_priv == file) {
185 list_del(&e->base.link); 194 list_del(&v->base.link);
186 e->base.destroy(&e->base); 195 drm_vblank_put(dev, v->pipe);
196 v->base.destroy(&v->base);
187 } 197 }
188 } 198 }
189 spin_unlock_irqrestore(&dev->event_lock, flags);
190 199
191 exynos_drm_subdrv_close(dev, file); 200 /* Release all events handled by page flip handler but not freed. */
192} 201 list_for_each_entry_safe(e, et, &file->event_list, link) {
202 list_del(&e->link);
203 e->destroy(e);
204 }
205 spin_unlock_irqrestore(&dev->event_lock, flags);
193 206
194static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
195{
196 if (!file->driver_priv)
197 return;
198 207
199 kfree(file->driver_priv); 208 kfree(file->driver_priv);
200 file->driver_priv = NULL; 209 file->driver_priv = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 23da72b5eae9..a61878bf5dcd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -31,7 +31,7 @@
31#include "exynos_drm_iommu.h" 31#include "exynos_drm_iommu.h"
32 32
33/* 33/*
34 * FIMD is stand for Fully Interactive Mobile Display and 34 * FIMD stands for Fully Interactive Mobile Display and
35 * as a display controller, it transfers contents drawn on memory 35 * as a display controller, it transfers contents drawn on memory
36 * to a LCD Panel through Display Interfaces such as RGB or 36 * to a LCD Panel through Display Interfaces such as RGB or
37 * CPU Interface. 37 * CPU Interface.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3271fd4b1724..7bccedca487a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -383,6 +383,8 @@ out:
383 g2d_userptr->npages, 383 g2d_userptr->npages,
384 g2d_userptr->vma); 384 g2d_userptr->vma);
385 385
386 exynos_gem_put_vma(g2d_userptr->vma);
387
386 if (!g2d_userptr->out_of_list) 388 if (!g2d_userptr->out_of_list)
387 list_del_init(&g2d_userptr->list); 389 list_del_init(&g2d_userptr->list);
388 390
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 989be12cdd6e..2e367a1c6a64 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev)
534 * Disable CRTCs directly since we want to preserve sw state 534 * Disable CRTCs directly since we want to preserve sw state
535 * for _thaw. 535 * for _thaw.
536 */ 536 */
537 mutex_lock(&dev->mode_config.mutex);
537 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 538 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
538 dev_priv->display.crtc_disable(crtc); 539 dev_priv->display.crtc_disable(crtc);
540 mutex_unlock(&dev->mode_config.mutex);
539 541
540 intel_modeset_suspend_hw(dev); 542 intel_modeset_suspend_hw(dev);
541 } 543 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8600c315b4c4..ccdbecca070d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1816,6 +1816,7 @@ struct drm_i915_file_private {
1816#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1816#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1817#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1817#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1818#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1818#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1819#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
1819 1820
1820#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1821#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1821#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1822#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12bbd5eac70d..621c7c67a643 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4442,10 +4442,9 @@ i915_gem_init_hw(struct drm_device *dev)
4442 if (dev_priv->ellc_size) 4442 if (dev_priv->ellc_size)
4443 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4443 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4444 4444
4445 if (IS_HSW_GT3(dev)) 4445 if (IS_HASWELL(dev))
4446 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); 4446 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4447 else 4447 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4448 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4449 4448
4450 if (HAS_PCH_NOP(dev)) { 4449 if (HAS_PCH_NOP(dev)) {
4451 u32 temp = I915_READ(GEN7_MSG_CTL); 4450 u32 temp = I915_READ(GEN7_MSG_CTL);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7d5752fda5f1..9bb533e0d762 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
125 125
126 ret = i915_gem_object_get_pages(obj); 126 ret = i915_gem_object_get_pages(obj);
127 if (ret) 127 if (ret)
128 goto error; 128 goto err;
129
130 i915_gem_object_pin_pages(obj);
129 131
130 ret = -ENOMEM; 132 ret = -ENOMEM;
131 133
132 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
133 if (pages == NULL) 135 if (pages == NULL)
134 goto error; 136 goto err_unpin;
135 137
136 i = 0; 138 i = 0;
137 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) 139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
141 drm_free_large(pages); 143 drm_free_large(pages);
142 144
143 if (!obj->dma_buf_vmapping) 145 if (!obj->dma_buf_vmapping)
144 goto error; 146 goto err_unpin;
145 147
146 obj->vmapping_count = 1; 148 obj->vmapping_count = 1;
147 i915_gem_object_pin_pages(obj);
148out_unlock: 149out_unlock:
149 mutex_unlock(&dev->struct_mutex); 150 mutex_unlock(&dev->struct_mutex);
150 return obj->dma_buf_vmapping; 151 return obj->dma_buf_vmapping;
151 152
152error: 153err_unpin:
154 i915_gem_object_unpin_pages(obj);
155err:
153 mutex_unlock(&dev->struct_mutex); 156 mutex_unlock(&dev->struct_mutex);
154 return ERR_PTR(ret); 157 return ERR_PTR(ret);
155} 158}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 885d595e0e02..b7e787fb4649 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38
36struct eb_vmas { 39struct eb_vmas {
37 struct list_head vmas; 40 struct list_head vmas;
38 int and; 41 int and;
@@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
187 } 190 }
188} 191}
189 192
190static void eb_destroy(struct eb_vmas *eb) { 193static void
194i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
195{
196 struct drm_i915_gem_exec_object2 *entry;
197 struct drm_i915_gem_object *obj = vma->obj;
198
199 if (!drm_mm_node_allocated(&vma->node))
200 return;
201
202 entry = vma->exec_entry;
203
204 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
205 i915_gem_object_unpin_fence(obj);
206
207 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
208 i915_gem_object_unpin(obj);
209
210 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
211}
212
213static void eb_destroy(struct eb_vmas *eb)
214{
191 while (!list_empty(&eb->vmas)) { 215 while (!list_empty(&eb->vmas)) {
192 struct i915_vma *vma; 216 struct i915_vma *vma;
193 217
@@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
195 struct i915_vma, 219 struct i915_vma,
196 exec_list); 220 exec_list);
197 list_del_init(&vma->exec_list); 221 list_del_init(&vma->exec_list);
222 i915_gem_execbuffer_unreserve_vma(vma);
198 drm_gem_object_unreference(&vma->obj->base); 223 drm_gem_object_unreference(&vma->obj->base);
199 } 224 }
200 kfree(eb); 225 kfree(eb);
@@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
478 return ret; 503 return ret;
479} 504}
480 505
481#define __EXEC_OBJECT_HAS_PIN (1<<31)
482#define __EXEC_OBJECT_HAS_FENCE (1<<30)
483
484static int 506static int
485need_reloc_mappable(struct i915_vma *vma) 507need_reloc_mappable(struct i915_vma *vma)
486{ 508{
@@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
552 return 0; 574 return 0;
553} 575}
554 576
555static void
556i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
557{
558 struct drm_i915_gem_exec_object2 *entry;
559 struct drm_i915_gem_object *obj = vma->obj;
560
561 if (!drm_mm_node_allocated(&vma->node))
562 return;
563
564 entry = vma->exec_entry;
565
566 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
567 i915_gem_object_unpin_fence(obj);
568
569 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
570 i915_gem_object_unpin(obj);
571
572 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
573}
574
575static int 577static int
576i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 578i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
577 struct list_head *vmas, 579 struct list_head *vmas,
@@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
670 goto err; 672 goto err;
671 } 673 }
672 674
673err: /* Decrement pin count for bound objects */ 675err:
674 list_for_each_entry(vma, vmas, exec_list)
675 i915_gem_execbuffer_unreserve_vma(vma);
676
677 if (ret != -ENOSPC || retry++) 676 if (ret != -ENOSPC || retry++)
678 return ret; 677 return ret;
679 678
679 /* Decrement pin count for bound objects */
680 list_for_each_entry(vma, vmas, exec_list)
681 i915_gem_execbuffer_unreserve_vma(vma);
682
680 ret = i915_gem_evict_vm(vm, true); 683 ret = i915_gem_evict_vm(vm, true);
681 if (ret) 684 if (ret)
682 return ret; 685 return ret;
@@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
708 while (!list_empty(&eb->vmas)) { 711 while (!list_empty(&eb->vmas)) {
709 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); 712 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
710 list_del_init(&vma->exec_list); 713 list_del_init(&vma->exec_list);
714 i915_gem_execbuffer_unreserve_vma(vma);
711 drm_gem_object_unreference(&vma->obj->base); 715 drm_gem_object_unreference(&vma->obj->base);
712 } 716 }
713 717
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3620a1b0a73c..38cb8d44a013 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
60#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
60#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 61#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
62#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
61 63
62#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) 64#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
63#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) 65#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
185 case I915_CACHE_NONE: 187 case I915_CACHE_NONE:
186 break; 188 break;
187 case I915_CACHE_WT: 189 case I915_CACHE_WT:
188 pte |= HSW_WT_ELLC_LLC_AGE0; 190 pte |= HSW_WT_ELLC_LLC_AGE3;
189 break; 191 break;
190 default: 192 default:
191 pte |= HSW_WB_ELLC_LLC_AGE0; 193 pte |= HSW_WB_ELLC_LLC_AGE3;
192 break; 194 break;
193 } 195 }
194 196
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f9eafb6ed523..ee2742122a02 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -235,6 +235,7 @@
235 */ 235 */
236#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 236#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
237#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) 237#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
238#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
238#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 239#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
239#define MI_FLUSH_DW_STORE_INDEX (1<<21) 240#define MI_FLUSH_DW_STORE_INDEX (1<<21)
240#define MI_INVALIDATE_TLB (1<<18) 241#define MI_INVALIDATE_TLB (1<<18)
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 43959edd4291..dfff0907f70e 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -196,7 +196,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
196 acpi_handle dhandle; 196 acpi_handle dhandle;
197 int ret; 197 int ret;
198 198
199 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 199 dhandle = ACPI_HANDLE(&pdev->dev);
200 if (!dhandle) 200 if (!dhandle)
201 return false; 201 return false;
202 202
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 6dd622d733b9..e4fba39631a5 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -790,7 +790,12 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
790 790
791 /* Default to using SSC */ 791 /* Default to using SSC */
792 dev_priv->vbt.lvds_use_ssc = 1; 792 dev_priv->vbt.lvds_use_ssc = 1;
793 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); 793 /*
794 * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
795 * clock for LVDS.
796 */
797 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
798 !HAS_PCH_SPLIT(dev));
794 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); 799 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
795 800
796 for (port = PORT_A; port < I915_MAX_PORTS; port++) { 801 for (port = PORT_A; port < I915_MAX_PORTS; port++) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 1591576a6101..526c8ded16b0 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
173 ddi_translations = ddi_translations_dp; 173 ddi_translations = ddi_translations_dp;
174 break; 174 break;
175 case PORT_D: 175 case PORT_D:
176 if (intel_dpd_is_edp(dev)) 176 if (intel_dp_is_edp(dev, PORT_D))
177 ddi_translations = ddi_translations_edp; 177 ddi_translations = ddi_translations_edp;
178 else 178 else
179 ddi_translations = ddi_translations_dp; 179 ddi_translations = ddi_translations_dp;
@@ -1158,9 +1158,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1158 if (wait) 1158 if (wait)
1159 intel_wait_ddi_buf_idle(dev_priv, port); 1159 intel_wait_ddi_buf_idle(dev_priv, port);
1160 1160
1161 if (type == INTEL_OUTPUT_EDP) { 1161 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1162 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1162 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1163 ironlake_edp_panel_vdd_on(intel_dp); 1163 ironlake_edp_panel_vdd_on(intel_dp);
1164 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1164 ironlake_edp_panel_off(intel_dp); 1165 ironlake_edp_panel_off(intel_dp);
1165 } 1166 }
1166 1167
@@ -1406,6 +1407,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1406 default: 1407 default:
1407 break; 1408 break;
1408 } 1409 }
1410
1411 if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
1412 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1413 /*
1414 * This is a big fat ugly hack.
1415 *
1416 * Some machines in UEFI boot mode provide us a VBT that has 18
1417 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1418 * unknown we fail to light up. Yet the same BIOS boots up with
1419 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1420 * max, not what it tells us to use.
1421 *
1422 * Note: This will still be broken if the eDP panel is not lit
1423 * up by the BIOS, and thus we can't get the mode at module
1424 * load.
1425 */
1426 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1427 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1428 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1429 }
1409} 1430}
1410 1431
1411static void intel_ddi_destroy(struct drm_encoder *encoder) 1432static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3cddd508d110..080f6fd4e839 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5815,7 +5815,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
5815 uint16_t postoff = 0; 5815 uint16_t postoff = 0;
5816 5816
5817 if (intel_crtc->config.limited_color_range) 5817 if (intel_crtc->config.limited_color_range)
5818 postoff = (16 * (1 << 13) / 255) & 0x1fff; 5818 postoff = (16 * (1 << 12) / 255) & 0x1fff;
5819 5819
5820 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 5820 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
5821 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 5821 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@@ -6402,7 +6402,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6402 6402
6403 /* Make sure we're not on PC8 state before disabling PC8, otherwise 6403 /* Make sure we're not on PC8 state before disabling PC8, otherwise
6404 * we'll hang the machine! */ 6404 * we'll hang the machine! */
6405 dev_priv->uncore.funcs.force_wake_get(dev_priv); 6405 gen6_gt_force_wake_get(dev_priv);
6406 6406
6407 if (val & LCPLL_POWER_DOWN_ALLOW) { 6407 if (val & LCPLL_POWER_DOWN_ALLOW) {
6408 val &= ~LCPLL_POWER_DOWN_ALLOW; 6408 val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6436,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6436 DRM_ERROR("Switching back to LCPLL failed\n"); 6436 DRM_ERROR("Switching back to LCPLL failed\n");
6437 } 6437 }
6438 6438
6439 dev_priv->uncore.funcs.force_wake_put(dev_priv); 6439 gen6_gt_force_wake_put(dev_priv);
6440} 6440}
6441 6441
6442void hsw_enable_pc8_work(struct work_struct *__work) 6442void hsw_enable_pc8_work(struct work_struct *__work)
@@ -6518,6 +6518,9 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6518 6518
6519void hsw_enable_package_c8(struct drm_i915_private *dev_priv) 6519void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6520{ 6520{
6521 if (!HAS_PC8(dev_priv->dev))
6522 return;
6523
6521 mutex_lock(&dev_priv->pc8.lock); 6524 mutex_lock(&dev_priv->pc8.lock);
6522 __hsw_enable_package_c8(dev_priv); 6525 __hsw_enable_package_c8(dev_priv);
6523 mutex_unlock(&dev_priv->pc8.lock); 6526 mutex_unlock(&dev_priv->pc8.lock);
@@ -6525,6 +6528,9 @@ void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6525 6528
6526void hsw_disable_package_c8(struct drm_i915_private *dev_priv) 6529void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6527{ 6530{
6531 if (!HAS_PC8(dev_priv->dev))
6532 return;
6533
6528 mutex_lock(&dev_priv->pc8.lock); 6534 mutex_lock(&dev_priv->pc8.lock);
6529 __hsw_disable_package_c8(dev_priv); 6535 __hsw_disable_package_c8(dev_priv);
6530 mutex_unlock(&dev_priv->pc8.lock); 6536 mutex_unlock(&dev_priv->pc8.lock);
@@ -6562,6 +6568,9 @@ static void hsw_update_package_c8(struct drm_device *dev)
6562 struct drm_i915_private *dev_priv = dev->dev_private; 6568 struct drm_i915_private *dev_priv = dev->dev_private;
6563 bool allow; 6569 bool allow;
6564 6570
6571 if (!HAS_PC8(dev_priv->dev))
6572 return;
6573
6565 if (!i915_enable_pc8) 6574 if (!i915_enable_pc8)
6566 return; 6575 return;
6567 6576
@@ -6585,18 +6594,28 @@ done:
6585 6594
6586static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv) 6595static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6587{ 6596{
6597 if (!HAS_PC8(dev_priv->dev))
6598 return;
6599
6600 mutex_lock(&dev_priv->pc8.lock);
6588 if (!dev_priv->pc8.gpu_idle) { 6601 if (!dev_priv->pc8.gpu_idle) {
6589 dev_priv->pc8.gpu_idle = true; 6602 dev_priv->pc8.gpu_idle = true;
6590 hsw_enable_package_c8(dev_priv); 6603 __hsw_enable_package_c8(dev_priv);
6591 } 6604 }
6605 mutex_unlock(&dev_priv->pc8.lock);
6592} 6606}
6593 6607
6594static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv) 6608static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6595{ 6609{
6610 if (!HAS_PC8(dev_priv->dev))
6611 return;
6612
6613 mutex_lock(&dev_priv->pc8.lock);
6596 if (dev_priv->pc8.gpu_idle) { 6614 if (dev_priv->pc8.gpu_idle) {
6597 dev_priv->pc8.gpu_idle = false; 6615 dev_priv->pc8.gpu_idle = false;
6598 hsw_disable_package_c8(dev_priv); 6616 __hsw_disable_package_c8(dev_priv);
6599 } 6617 }
6618 mutex_unlock(&dev_priv->pc8.lock);
6600} 6619}
6601 6620
6602#define for_each_power_domain(domain, mask) \ 6621#define for_each_power_domain(domain, mask) \
@@ -7184,7 +7203,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
7184 intel_crtc->cursor_visible = visible; 7203 intel_crtc->cursor_visible = visible;
7185 } 7204 }
7186 /* and commit changes on next vblank */ 7205 /* and commit changes on next vblank */
7206 POSTING_READ(CURCNTR(pipe));
7187 I915_WRITE(CURBASE(pipe), base); 7207 I915_WRITE(CURBASE(pipe), base);
7208 POSTING_READ(CURBASE(pipe));
7188} 7209}
7189 7210
7190static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) 7211static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -7213,7 +7234,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7213 intel_crtc->cursor_visible = visible; 7234 intel_crtc->cursor_visible = visible;
7214 } 7235 }
7215 /* and commit changes on next vblank */ 7236 /* and commit changes on next vblank */
7237 POSTING_READ(CURCNTR_IVB(pipe));
7216 I915_WRITE(CURBASE_IVB(pipe), base); 7238 I915_WRITE(CURBASE_IVB(pipe), base);
7239 POSTING_READ(CURBASE_IVB(pipe));
7217} 7240}
7218 7241
7219/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 7242/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -8331,7 +8354,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8331 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 8354 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8332 DERRMR_PIPEB_PRI_FLIP_DONE | 8355 DERRMR_PIPEB_PRI_FLIP_DONE |
8333 DERRMR_PIPEC_PRI_FLIP_DONE)); 8356 DERRMR_PIPEC_PRI_FLIP_DONE));
8334 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); 8357 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
8358 MI_SRM_LRM_GLOBAL_GTT);
8335 intel_ring_emit(ring, DERRMR); 8359 intel_ring_emit(ring, DERRMR);
8336 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 8360 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8337 } 8361 }
@@ -9248,8 +9272,7 @@ check_crtc_state(struct drm_device *dev)
9248 enum pipe pipe; 9272 enum pipe pipe;
9249 if (encoder->base.crtc != &crtc->base) 9273 if (encoder->base.crtc != &crtc->base)
9250 continue; 9274 continue;
9251 if (encoder->get_config && 9275 if (encoder->get_hw_state(encoder, &pipe))
9252 encoder->get_hw_state(encoder, &pipe))
9253 encoder->get_config(encoder, &pipe_config); 9276 encoder->get_config(encoder, &pipe_config);
9254 } 9277 }
9255 9278
@@ -10027,7 +10050,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10027 intel_ddi_init(dev, PORT_D); 10050 intel_ddi_init(dev, PORT_D);
10028 } else if (HAS_PCH_SPLIT(dev)) { 10051 } else if (HAS_PCH_SPLIT(dev)) {
10029 int found; 10052 int found;
10030 dpd_is_edp = intel_dpd_is_edp(dev); 10053 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
10031 10054
10032 if (has_edp_a(dev)) 10055 if (has_edp_a(dev))
10033 intel_dp_init(dev, DP_A, PORT_A); 10056 intel_dp_init(dev, DP_A, PORT_A);
@@ -10064,8 +10087,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10064 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 10087 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
10065 PORT_C); 10088 PORT_C);
10066 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 10089 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
10067 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, 10090 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
10068 PORT_C);
10069 } 10091 }
10070 10092
10071 intel_dsi_init(dev); 10093 intel_dsi_init(dev);
@@ -10909,8 +10931,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
10909 if (encoder->get_hw_state(encoder, &pipe)) { 10931 if (encoder->get_hw_state(encoder, &pipe)) {
10910 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 10932 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
10911 encoder->base.crtc = &crtc->base; 10933 encoder->base.crtc = &crtc->base;
10912 if (encoder->get_config) 10934 encoder->get_config(encoder, &crtc->config);
10913 encoder->get_config(encoder, &crtc->config);
10914 } else { 10935 } else {
10915 encoder->base.crtc = NULL; 10936 encoder->base.crtc = NULL;
10916 } 10937 }
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index eb8139da9763..30c627c7b7ba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1774,7 +1774,7 @@ static void intel_disable_dp(struct intel_encoder *encoder)
1774 * ensure that we have vdd while we switch off the panel. */ 1774 * ensure that we have vdd while we switch off the panel. */
1775 ironlake_edp_panel_vdd_on(intel_dp); 1775 ironlake_edp_panel_vdd_on(intel_dp);
1776 ironlake_edp_backlight_off(intel_dp); 1776 ironlake_edp_backlight_off(intel_dp);
1777 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1777 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1778 ironlake_edp_panel_off(intel_dp); 1778 ironlake_edp_panel_off(intel_dp);
1779 1779
1780 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1780 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
@@ -3326,11 +3326,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3326} 3326}
3327 3327
3328/* check the VBT to see whether the eDP is on DP-D port */ 3328/* check the VBT to see whether the eDP is on DP-D port */
3329bool intel_dpd_is_edp(struct drm_device *dev) 3329bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3330{ 3330{
3331 struct drm_i915_private *dev_priv = dev->dev_private; 3331 struct drm_i915_private *dev_priv = dev->dev_private;
3332 union child_device_config *p_child; 3332 union child_device_config *p_child;
3333 int i; 3333 int i;
3334 static const short port_mapping[] = {
3335 [PORT_B] = PORT_IDPB,
3336 [PORT_C] = PORT_IDPC,
3337 [PORT_D] = PORT_IDPD,
3338 };
3339
3340 if (port == PORT_A)
3341 return true;
3334 3342
3335 if (!dev_priv->vbt.child_dev_num) 3343 if (!dev_priv->vbt.child_dev_num)
3336 return false; 3344 return false;
@@ -3338,7 +3346,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3338 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3346 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3339 p_child = dev_priv->vbt.child_dev + i; 3347 p_child = dev_priv->vbt.child_dev + i;
3340 3348
3341 if (p_child->common.dvo_port == PORT_IDPD && 3349 if (p_child->common.dvo_port == port_mapping[port] &&
3342 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == 3350 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3343 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) 3351 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3344 return true; 3352 return true;
@@ -3616,26 +3624,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3616 intel_dp->DP = I915_READ(intel_dp->output_reg); 3624 intel_dp->DP = I915_READ(intel_dp->output_reg);
3617 intel_dp->attached_connector = intel_connector; 3625 intel_dp->attached_connector = intel_connector;
3618 3626
3619 type = DRM_MODE_CONNECTOR_DisplayPort; 3627 if (intel_dp_is_edp(dev, port))
3620 /*
3621 * FIXME : We need to initialize built-in panels before external panels.
3622 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3623 */
3624 switch (port) {
3625 case PORT_A:
3626 type = DRM_MODE_CONNECTOR_eDP; 3628 type = DRM_MODE_CONNECTOR_eDP;
3627 break; 3629 else
3628 case PORT_C: 3630 type = DRM_MODE_CONNECTOR_DisplayPort;
3629 if (IS_VALLEYVIEW(dev))
3630 type = DRM_MODE_CONNECTOR_eDP;
3631 break;
3632 case PORT_D:
3633 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3634 type = DRM_MODE_CONNECTOR_eDP;
3635 break;
3636 default: /* silence GCC warning */
3637 break;
3638 }
3639 3631
3640 /* 3632 /*
3641 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 3633 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1e49aa8f5377..a18e88b3e425 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -708,7 +708,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
708void intel_dp_check_link_status(struct intel_dp *intel_dp); 708void intel_dp_check_link_status(struct intel_dp *intel_dp);
709bool intel_dp_compute_config(struct intel_encoder *encoder, 709bool intel_dp_compute_config(struct intel_encoder *encoder,
710 struct intel_crtc_config *pipe_config); 710 struct intel_crtc_config *pipe_config);
711bool intel_dpd_is_edp(struct drm_device *dev); 711bool intel_dp_is_edp(struct drm_device *dev, enum port port);
712void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 712void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
713void ironlake_edp_backlight_off(struct intel_dp *intel_dp); 713void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
714void ironlake_edp_panel_on(struct intel_dp *intel_dp); 714void ironlake_edp_panel_on(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 1b2f41c3f191..6d69a9bad865 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -638,7 +638,7 @@ static void intel_didl_outputs(struct drm_device *dev)
638 u32 temp; 638 u32 temp;
639 int i = 0; 639 int i = 0;
640 640
641 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); 641 handle = ACPI_HANDLE(&dev->pdev->dev);
642 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) 642 if (!handle || acpi_bus_get_device(handle, &acpi_dev))
643 return; 643 return;
644 644
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0a07d7c9cafc..6e0d5e075b15 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1180,7 +1180,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1180 1180
1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1182 clock = adjusted_mode->crtc_clock; 1182 clock = adjusted_mode->crtc_clock;
1183 htotal = adjusted_mode->htotal; 1183 htotal = adjusted_mode->crtc_htotal;
1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1185 pixel_size = crtc->fb->bits_per_pixel / 8; 1185 pixel_size = crtc->fb->bits_per_pixel / 8;
1186 1186
@@ -1267,7 +1267,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1267 crtc = intel_get_crtc_for_plane(dev, plane); 1267 crtc = intel_get_crtc_for_plane(dev, plane);
1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1269 clock = adjusted_mode->crtc_clock; 1269 clock = adjusted_mode->crtc_clock;
1270 htotal = adjusted_mode->htotal; 1270 htotal = adjusted_mode->crtc_htotal;
1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1272 pixel_size = crtc->fb->bits_per_pixel / 8; 1272 pixel_size = crtc->fb->bits_per_pixel / 8;
1273 1273
@@ -1498,7 +1498,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1498 const struct drm_display_mode *adjusted_mode = 1498 const struct drm_display_mode *adjusted_mode =
1499 &to_intel_crtc(crtc)->config.adjusted_mode; 1499 &to_intel_crtc(crtc)->config.adjusted_mode;
1500 int clock = adjusted_mode->crtc_clock; 1500 int clock = adjusted_mode->crtc_clock;
1501 int htotal = adjusted_mode->htotal; 1501 int htotal = adjusted_mode->crtc_htotal;
1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1503 int pixel_size = crtc->fb->bits_per_pixel / 8; 1503 int pixel_size = crtc->fb->bits_per_pixel / 8;
1504 unsigned long line_time_us; 1504 unsigned long line_time_us;
@@ -1624,8 +1624,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1624 const struct drm_display_mode *adjusted_mode = 1624 const struct drm_display_mode *adjusted_mode =
1625 &to_intel_crtc(enabled)->config.adjusted_mode; 1625 &to_intel_crtc(enabled)->config.adjusted_mode;
1626 int clock = adjusted_mode->crtc_clock; 1626 int clock = adjusted_mode->crtc_clock;
1627 int htotal = adjusted_mode->htotal; 1627 int htotal = adjusted_mode->crtc_htotal;
1628 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1628 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1629 int pixel_size = enabled->fb->bits_per_pixel / 8; 1629 int pixel_size = enabled->fb->bits_per_pixel / 8;
1630 unsigned long line_time_us; 1630 unsigned long line_time_us;
1631 int entries; 1631 int entries;
@@ -1776,7 +1776,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1776 crtc = intel_get_crtc_for_plane(dev, plane); 1776 crtc = intel_get_crtc_for_plane(dev, plane);
1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1778 clock = adjusted_mode->crtc_clock; 1778 clock = adjusted_mode->crtc_clock;
1779 htotal = adjusted_mode->htotal; 1779 htotal = adjusted_mode->crtc_htotal;
1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1781 pixel_size = crtc->fb->bits_per_pixel / 8; 1781 pixel_size = crtc->fb->bits_per_pixel / 8;
1782 1782
@@ -2469,8 +2469,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2469 /* The WM are computed with base on how long it takes to fill a single 2469 /* The WM are computed with base on how long it takes to fill a single
2470 * row at the given clock rate, multiplied by 8. 2470 * row at the given clock rate, multiplied by 8.
2471 * */ 2471 * */
2472 linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock); 2472 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2473 ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, 2473 mode->crtc_clock);
2474 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2474 intel_ddi_get_cdclk_freq(dev_priv)); 2475 intel_ddi_get_cdclk_freq(dev_priv));
2475 2476
2476 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2477 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@ -3888,7 +3889,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3888 3889
3889 I915_WRITE(GEN6_RC_SLEEP, 0); 3890 I915_WRITE(GEN6_RC_SLEEP, 0);
3890 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 3891 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3891 if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) 3892 if (IS_IVYBRIDGE(dev))
3892 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 3893 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3893 else 3894 else
3894 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 3895 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 18c406246a2d..22cf0f4ba248 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -902,6 +902,13 @@ intel_tv_mode_valid(struct drm_connector *connector,
902} 902}
903 903
904 904
905static void
906intel_tv_get_config(struct intel_encoder *encoder,
907 struct intel_crtc_config *pipe_config)
908{
909 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
910}
911
905static bool 912static bool
906intel_tv_compute_config(struct intel_encoder *encoder, 913intel_tv_compute_config(struct intel_encoder *encoder,
907 struct intel_crtc_config *pipe_config) 914 struct intel_crtc_config *pipe_config)
@@ -1621,6 +1628,7 @@ intel_tv_init(struct drm_device *dev)
1621 DRM_MODE_ENCODER_TVDAC); 1628 DRM_MODE_ENCODER_TVDAC);
1622 1629
1623 intel_encoder->compute_config = intel_tv_compute_config; 1630 intel_encoder->compute_config = intel_tv_compute_config;
1631 intel_encoder->get_config = intel_tv_get_config;
1624 intel_encoder->mode_set = intel_tv_mode_set; 1632 intel_encoder->mode_set = intel_tv_mode_set;
1625 intel_encoder->enable = intel_enable_tv; 1633 intel_encoder->enable = intel_enable_tv;
1626 intel_encoder->disable = intel_disable_tv; 1634 intel_encoder->disable = intel_disable_tv;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index f9883ceff946..0b02078a0b84 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -217,6 +217,19 @@ static void gen6_force_wake_work(struct work_struct *work)
217 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 217 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
218} 218}
219 219
220static void intel_uncore_forcewake_reset(struct drm_device *dev)
221{
222 struct drm_i915_private *dev_priv = dev->dev_private;
223
224 if (IS_VALLEYVIEW(dev)) {
225 vlv_force_wake_reset(dev_priv);
226 } else if (INTEL_INFO(dev)->gen >= 6) {
227 __gen6_gt_force_wake_reset(dev_priv);
228 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
229 __gen6_gt_force_wake_mt_reset(dev_priv);
230 }
231}
232
220void intel_uncore_early_sanitize(struct drm_device *dev) 233void intel_uncore_early_sanitize(struct drm_device *dev)
221{ 234{
222 struct drm_i915_private *dev_priv = dev->dev_private; 235 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -234,19 +247,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
234 dev_priv->ellc_size = 128; 247 dev_priv->ellc_size = 128;
235 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 248 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
236 } 249 }
237}
238 250
239static void intel_uncore_forcewake_reset(struct drm_device *dev) 251 intel_uncore_forcewake_reset(dev);
240{
241 struct drm_i915_private *dev_priv = dev->dev_private;
242
243 if (IS_VALLEYVIEW(dev)) {
244 vlv_force_wake_reset(dev_priv);
245 } else if (INTEL_INFO(dev)->gen >= 6) {
246 __gen6_gt_force_wake_reset(dev_priv);
247 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
248 __gen6_gt_force_wake_mt_reset(dev_priv);
249 }
250} 252}
251 253
252void intel_uncore_sanitize(struct drm_device *dev) 254void intel_uncore_sanitize(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index edcf801613e6..b3fa1ba191b7 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -59,6 +59,7 @@ nouveau-y += core/subdev/clock/nv40.o
59nouveau-y += core/subdev/clock/nv50.o 59nouveau-y += core/subdev/clock/nv50.o
60nouveau-y += core/subdev/clock/nv84.o 60nouveau-y += core/subdev/clock/nv84.o
61nouveau-y += core/subdev/clock/nva3.o 61nouveau-y += core/subdev/clock/nva3.o
62nouveau-y += core/subdev/clock/nvaa.o
62nouveau-y += core/subdev/clock/nvc0.o 63nouveau-y += core/subdev/clock/nvc0.o
63nouveau-y += core/subdev/clock/nve0.o 64nouveau-y += core/subdev/clock/nve0.o
64nouveau-y += core/subdev/clock/pllnv04.o 65nouveau-y += core/subdev/clock/pllnv04.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index db139827047c..db3fc7be856a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -283,7 +283,7 @@ nv50_identify(struct nouveau_device *device)
283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
@@ -311,7 +311,7 @@ nv50_identify(struct nouveau_device *device)
311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
317 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 317 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 5f555788121c..e6352bd5b4ff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -33,6 +33,7 @@
33#include <engine/dmaobj.h> 33#include <engine/dmaobj.h>
34#include <engine/fifo.h> 34#include <engine/fifo.h>
35 35
36#include "nv04.h"
36#include "nv50.h" 37#include "nv50.h"
37 38
38/******************************************************************************* 39/*******************************************************************************
@@ -460,6 +461,8 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
460 nv_subdev(priv)->intr = nv04_fifo_intr; 461 nv_subdev(priv)->intr = nv04_fifo_intr;
461 nv_engine(priv)->cclass = &nv50_fifo_cclass; 462 nv_engine(priv)->cclass = &nv50_fifo_cclass;
462 nv_engine(priv)->sclass = nv50_fifo_sclass; 463 nv_engine(priv)->sclass = nv50_fifo_sclass;
464 priv->base.pause = nv04_fifo_pause;
465 priv->base.start = nv04_fifo_start;
463 return 0; 466 return 0;
464} 467}
465 468
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 0908dc834c84..fe0f41e65d9b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -35,6 +35,7 @@
35#include <engine/dmaobj.h> 35#include <engine/dmaobj.h>
36#include <engine/fifo.h> 36#include <engine/fifo.h>
37 37
38#include "nv04.h"
38#include "nv50.h" 39#include "nv50.h"
39 40
40/******************************************************************************* 41/*******************************************************************************
@@ -432,6 +433,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
432 nv_subdev(priv)->intr = nv04_fifo_intr; 433 nv_subdev(priv)->intr = nv04_fifo_intr;
433 nv_engine(priv)->cclass = &nv84_fifo_cclass; 434 nv_engine(priv)->cclass = &nv84_fifo_cclass;
434 nv_engine(priv)->sclass = nv84_fifo_sclass; 435 nv_engine(priv)->sclass = nv84_fifo_sclass;
436 priv->base.pause = nv04_fifo_pause;
437 priv->base.start = nv04_fifo_start;
435 return 0; 438 return 0;
436} 439}
437 440
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b574dd4bb828..5ce686ee729e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -176,7 +176,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
176 if (ret) 176 if (ret)
177 return ret; 177 return ret;
178 178
179 chan->vblank.nr_event = pdisp->vblank->index_nr; 179 chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0;
180 chan->vblank.event = kzalloc(chan->vblank.nr_event * 180 chan->vblank.event = kzalloc(chan->vblank.nr_event *
181 sizeof(*chan->vblank.event), GFP_KERNEL); 181 sizeof(*chan->vblank.event), GFP_KERNEL);
182 if (!chan->vblank.event) 182 if (!chan->vblank.event)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index e2675bc0edba..8f4ced75444a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -14,6 +14,9 @@ enum nv_clk_src {
14 nv_clk_src_hclk, 14 nv_clk_src_hclk,
15 nv_clk_src_hclkm3, 15 nv_clk_src_hclkm3,
16 nv_clk_src_hclkm3d2, 16 nv_clk_src_hclkm3d2,
17 nv_clk_src_hclkm2d3, /* NVAA */
18 nv_clk_src_hclkm4, /* NVAA */
19 nv_clk_src_cclk, /* NVAA */
17 20
18 nv_clk_src_host, 21 nv_clk_src_host,
19 22
@@ -127,6 +130,7 @@ extern struct nouveau_oclass nv04_clock_oclass;
127extern struct nouveau_oclass nv40_clock_oclass; 130extern struct nouveau_oclass nv40_clock_oclass;
128extern struct nouveau_oclass *nv50_clock_oclass; 131extern struct nouveau_oclass *nv50_clock_oclass;
129extern struct nouveau_oclass *nv84_clock_oclass; 132extern struct nouveau_oclass *nv84_clock_oclass;
133extern struct nouveau_oclass *nvaa_clock_oclass;
130extern struct nouveau_oclass nva3_clock_oclass; 134extern struct nouveau_oclass nva3_clock_oclass;
131extern struct nouveau_oclass nvc0_clock_oclass; 135extern struct nouveau_oclass nvc0_clock_oclass;
132extern struct nouveau_oclass nve0_clock_oclass; 136extern struct nouveau_oclass nve0_clock_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index da50c1b12928..30c1f3a4158e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -69,6 +69,11 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
69 return 0; 69 return 0;
70} 70}
71 71
72static struct nouveau_clocks
73nv04_domain[] = {
74 { nv_clk_src_max }
75};
76
72static int 77static int
73nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 78nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
74 struct nouveau_oclass *oclass, void *data, u32 size, 79 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,7 +82,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 struct nv04_clock_priv *priv; 82 struct nv04_clock_priv *priv;
78 int ret; 83 int ret;
79 84
80 ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv); 85 ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv);
81 *pobject = nv_object(priv); 86 *pobject = nv_object(priv);
82 if (ret) 87 if (ret)
83 return ret; 88 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
new file mode 100644
index 000000000000..7a723b4f564d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -0,0 +1,445 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/fifo.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28#include <subdev/timer.h>
29#include <subdev/clock.h>
30
31#include "pll.h"
32
33struct nvaa_clock_priv {
34 struct nouveau_clock base;
35 enum nv_clk_src csrc, ssrc, vsrc;
36 u32 cctrl, sctrl;
37 u32 ccoef, scoef;
38 u32 cpost, spost;
39 u32 vdiv;
40};
41
42static u32
43read_div(struct nouveau_clock *clk)
44{
45 return nv_rd32(clk, 0x004600);
46}
47
48static u32
49read_pll(struct nouveau_clock *clk, u32 base)
50{
51 u32 ctrl = nv_rd32(clk, base + 0);
52 u32 coef = nv_rd32(clk, base + 4);
53 u32 ref = clk->read(clk, nv_clk_src_href);
54 u32 post_div = 0;
55 u32 clock = 0;
56 int N1, M1;
57
58 switch (base){
59 case 0x4020:
60 post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
61 break;
62 case 0x4028:
63 post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
64 break;
65 default:
66 break;
67 }
68
69 N1 = (coef & 0x0000ff00) >> 8;
70 M1 = (coef & 0x000000ff);
71 if ((ctrl & 0x80000000) && M1) {
72 clock = ref * N1 / M1;
73 clock = clock / post_div;
74 }
75
76 return clock;
77}
78
79static int
80nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
81{
82 struct nvaa_clock_priv *priv = (void *)clk;
83 u32 mast = nv_rd32(clk, 0x00c054);
84 u32 P = 0;
85
86 switch (src) {
87 case nv_clk_src_crystal:
88 return nv_device(priv)->crystal;
89 case nv_clk_src_href:
90 return 100000; /* PCIE reference clock */
91 case nv_clk_src_hclkm4:
92 return clk->read(clk, nv_clk_src_href) * 4;
93 case nv_clk_src_hclkm2d3:
94 return clk->read(clk, nv_clk_src_href) * 2 / 3;
95 case nv_clk_src_host:
96 switch (mast & 0x000c0000) {
97 case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
98 case 0x00040000: break;
99 case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
100 case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
101 }
102 break;
103 case nv_clk_src_core:
104 P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
105
106 switch (mast & 0x00000003) {
107 case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
108 case 0x00000001: return 0;
109 case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
110 case 0x00000003: return read_pll(clk, 0x004028) >> P;
111 }
112 break;
113 case nv_clk_src_cclk:
114 if ((mast & 0x03000000) != 0x03000000)
115 return clk->read(clk, nv_clk_src_core);
116
117 if ((mast & 0x00000200) == 0x00000000)
118 return clk->read(clk, nv_clk_src_core);
119
120 switch (mast & 0x00000c00) {
121 case 0x00000000: return clk->read(clk, nv_clk_src_href);
122 case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
123 case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
124 default: return 0;
125 }
126 case nv_clk_src_shader:
127 P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
128 switch (mast & 0x00000030) {
129 case 0x00000000:
130 if (mast & 0x00000040)
131 return clk->read(clk, nv_clk_src_href) >> P;
132 return clk->read(clk, nv_clk_src_crystal) >> P;
133 case 0x00000010: break;
134 case 0x00000020: return read_pll(clk, 0x004028) >> P;
135 case 0x00000030: return read_pll(clk, 0x004020) >> P;
136 }
137 break;
138 case nv_clk_src_mem:
139 return 0;
140 break;
141 case nv_clk_src_vdec:
142 P = (read_div(clk) & 0x00000700) >> 8;
143
144 switch (mast & 0x00400000) {
145 case 0x00400000:
146 return clk->read(clk, nv_clk_src_core) >> P;
147 break;
148 default:
149 return 500000 >> P;
150 break;
151 }
152 break;
153 default:
154 break;
155 }
156
157 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
158 return 0;
159}
160
161static u32
162calc_pll(struct nvaa_clock_priv *priv, u32 reg,
163 u32 clock, int *N, int *M, int *P)
164{
165 struct nouveau_bios *bios = nouveau_bios(priv);
166 struct nvbios_pll pll;
167 struct nouveau_clock *clk = &priv->base;
168 int ret;
169
170 ret = nvbios_pll_parse(bios, reg, &pll);
171 if (ret)
172 return 0;
173
174 pll.vco2.max_freq = 0;
175 pll.refclk = clk->read(clk, nv_clk_src_href);
176 if (!pll.refclk)
177 return 0;
178
179 return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
180}
181
182static inline u32
183calc_P(u32 src, u32 target, int *div)
184{
185 u32 clk0 = src, clk1 = src;
186 for (*div = 0; *div <= 7; (*div)++) {
187 if (clk0 <= target) {
188 clk1 = clk0 << (*div ? 1 : 0);
189 break;
190 }
191 clk0 >>= 1;
192 }
193
194 if (target - clk0 <= clk1 - target)
195 return clk0;
196 (*div)--;
197 return clk1;
198}
199
200static int
201nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
202{
203 struct nvaa_clock_priv *priv = (void *)clk;
204 const int shader = cstate->domain[nv_clk_src_shader];
205 const int core = cstate->domain[nv_clk_src_core];
206 const int vdec = cstate->domain[nv_clk_src_vdec];
207 u32 out = 0, clock = 0;
208 int N, M, P1, P2 = 0;
209 int divs = 0;
210
211 /* cclk: find suitable source, disable PLL if we can */
212 if (core < clk->read(clk, nv_clk_src_hclkm4))
213 out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
214
215 /* Calculate clock * 2, so shader clock can use it too */
216 clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
217
218 if (abs(core - out) <=
219 abs(core - (clock >> 1))) {
220 priv->csrc = nv_clk_src_hclkm4;
221 priv->cctrl = divs << 16;
222 } else {
223 /* NVCTRL is actually used _after_ NVPOST, and after what we
224 * call NVPLL. To make matters worse, NVPOST is an integer
225 * divider instead of a right-shift number. */
226 if(P1 > 2) {
227 P2 = P1 - 2;
228 P1 = 2;
229 }
230
231 priv->csrc = nv_clk_src_core;
232 priv->ccoef = (N << 8) | M;
233
234 priv->cctrl = (P2 + 1) << 16;
235 priv->cpost = (1 << P1) << 16;
236 }
237
238 /* sclk: nvpll + divisor, href or spll */
239 out = 0;
240 if (shader == clk->read(clk, nv_clk_src_href)) {
241 priv->ssrc = nv_clk_src_href;
242 } else {
243 clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
244 if (priv->csrc == nv_clk_src_core) {
245 out = calc_P((core << 1), shader, &divs);
246 }
247
248 if (abs(shader - out) <=
249 abs(shader - clock) &&
250 (divs + P2) <= 7) {
251 priv->ssrc = nv_clk_src_core;
252 priv->sctrl = (divs + P2) << 16;
253 } else {
254 priv->ssrc = nv_clk_src_shader;
255 priv->scoef = (N << 8) | M;
256 priv->sctrl = P1 << 16;
257 }
258 }
259
260 /* vclk */
261 out = calc_P(core, vdec, &divs);
262 clock = calc_P(500000, vdec, &P1);
263 if(abs(vdec - out) <=
264 abs(vdec - clock)) {
265 priv->vsrc = nv_clk_src_cclk;
266 priv->vdiv = divs << 16;
267 } else {
268 priv->vsrc = nv_clk_src_vdec;
269 priv->vdiv = P1 << 16;
270 }
271
272 /* Print strategy! */
273 nv_debug(priv, "nvpll: %08x %08x %08x\n",
274 priv->ccoef, priv->cpost, priv->cctrl);
275 nv_debug(priv, " spll: %08x %08x %08x\n",
276 priv->scoef, priv->spost, priv->sctrl);
277 nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
278 if (priv->csrc == nv_clk_src_hclkm4)
279 nv_debug(priv, "core: hrefm4\n");
280 else
281 nv_debug(priv, "core: nvpll\n");
282
283 if (priv->ssrc == nv_clk_src_hclkm4)
284 nv_debug(priv, "shader: hrefm4\n");
285 else if (priv->ssrc == nv_clk_src_core)
286 nv_debug(priv, "shader: nvpll\n");
287 else
288 nv_debug(priv, "shader: spll\n");
289
290 if (priv->vsrc == nv_clk_src_hclkm4)
291 nv_debug(priv, "vdec: 500MHz\n");
292 else
293 nv_debug(priv, "vdec: core\n");
294
295 return 0;
296}
297
298static int
299nvaa_clock_prog(struct nouveau_clock *clk)
300{
301 struct nvaa_clock_priv *priv = (void *)clk;
302 struct nouveau_fifo *pfifo = nouveau_fifo(clk);
303 unsigned long flags;
304 u32 pllmask = 0, mast, ptherm_gate;
305 int ret = -EBUSY;
306
307 /* halt and idle execution engines */
308 ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
309 nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
310 /* Wait until the interrupt handler is finished */
311 if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
312 goto resume;
313
314 if (pfifo)
315 pfifo->pause(pfifo, &flags);
316
317 if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
318 goto resume;
319 if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
320 goto resume;
321
322 /* First switch to safe clocks: href */
323 mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
324 mast &= ~0x00400e73;
325 mast |= 0x03000000;
326
327 switch (priv->csrc) {
328 case nv_clk_src_hclkm4:
329 nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
330 mast |= 0x00000002;
331 break;
332 case nv_clk_src_core:
333 nv_wr32(clk, 0x402c, priv->ccoef);
334 nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
335 nv_wr32(clk, 0x4040, priv->cpost);
336 pllmask |= (0x3 << 8);
337 mast |= 0x00000003;
338 break;
339 default:
340 nv_warn(priv,"Reclocking failed: unknown core clock\n");
341 goto resume;
342 }
343
344 switch (priv->ssrc) {
345 case nv_clk_src_href:
346 nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
347 /* mast |= 0x00000000; */
348 break;
349 case nv_clk_src_core:
350 nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
351 mast |= 0x00000020;
352 break;
353 case nv_clk_src_shader:
354 nv_wr32(clk, 0x4024, priv->scoef);
355 nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
356 nv_wr32(clk, 0x4070, priv->spost);
357 pllmask |= (0x3 << 12);
358 mast |= 0x00000030;
359 break;
360 default:
361 nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
362 goto resume;
363 }
364
365 if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
366 nv_warn(priv,"Reclocking failed: unstable PLLs\n");
367 goto resume;
368 }
369
370 switch (priv->vsrc) {
371 case nv_clk_src_cclk:
372 mast |= 0x00400000;
373 default:
374 nv_wr32(clk, 0x4600, priv->vdiv);
375 }
376
377 nv_wr32(clk, 0xc054, mast);
378 ret = 0;
379
380resume:
381 if (pfifo)
382 pfifo->start(pfifo, &flags);
383
384 nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
385 nv_wr32(clk, 0x020060, ptherm_gate);
386
387 /* Disable some PLLs and dividers when unused */
388 if (priv->csrc != nv_clk_src_core) {
389 nv_wr32(clk, 0x4040, 0x00000000);
390 nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
391 }
392
393 if (priv->ssrc != nv_clk_src_shader) {
394 nv_wr32(clk, 0x4070, 0x00000000);
395 nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
396 }
397
398 return ret;
399}
400
401static void
402nvaa_clock_tidy(struct nouveau_clock *clk)
403{
404}
405
406static struct nouveau_clocks
407nvaa_domains[] = {
408 { nv_clk_src_crystal, 0xff },
409 { nv_clk_src_href , 0xff },
410 { nv_clk_src_core , 0xff, 0, "core", 1000 },
411 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
412 { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
413 { nv_clk_src_max }
414};
415
416static int
417nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
418 struct nouveau_oclass *oclass, void *data, u32 size,
419 struct nouveau_object **pobject)
420{
421 struct nvaa_clock_priv *priv;
422 int ret;
423
424 ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv);
425 *pobject = nv_object(priv);
426 if (ret)
427 return ret;
428
429 priv->base.read = nvaa_clock_read;
430 priv->base.calc = nvaa_clock_calc;
431 priv->base.prog = nvaa_clock_prog;
432 priv->base.tidy = nvaa_clock_tidy;
433 return 0;
434}
435
436struct nouveau_oclass *
437nvaa_clock_oclass = &(struct nouveau_oclass) {
438 .handle = NV_SUBDEV(CLOCK, 0xaa),
439 .ofuncs = &(struct nouveau_ofuncs) {
440 .ctor = nvaa_clock_ctor,
441 .dtor = _nouveau_clock_dtor,
442 .init = _nouveau_clock_init,
443 .fini = _nouveau_clock_fini,
444 },
445};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index e286e132c7e7..129120473f6c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -116,7 +116,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
116 acpi_handle handle; 116 acpi_handle handle;
117 int ret; 117 int ret;
118 118
119 handle = DEVICE_ACPI_HANDLE(&device->pdev->dev); 119 handle = ACPI_HANDLE(&device->pdev->dev);
120 if (!handle) 120 if (!handle)
121 return false; 121 return false;
122 122
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 3618ac6b6316..32e7064b819b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -58,8 +58,8 @@ struct nouveau_plane {
58}; 58};
59 59
60static uint32_t formats[] = { 60static uint32_t formats[] = {
61 DRM_FORMAT_NV12,
62 DRM_FORMAT_UYVY, 61 DRM_FORMAT_UYVY,
62 DRM_FORMAT_NV12,
63}; 63};
64 64
65/* Sine can be approximated with 65/* Sine can be approximated with
@@ -99,13 +99,28 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
99 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 99 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
100 struct nouveau_bo *cur = nv_plane->cur; 100 struct nouveau_bo *cur = nv_plane->cur;
101 bool flip = nv_plane->flip; 101 bool flip = nv_plane->flip;
102 int format = ALIGN(src_w * 4, 0x100);
103 int soff = NV_PCRTC0_SIZE * nv_crtc->index; 102 int soff = NV_PCRTC0_SIZE * nv_crtc->index;
104 int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index; 103 int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
105 int ret; 104 int format, ret;
105
106 /* Source parameters given in 16.16 fixed point, ignore fractional. */
107 src_x >>= 16;
108 src_y >>= 16;
109 src_w >>= 16;
110 src_h >>= 16;
111
112 format = ALIGN(src_w * 4, 0x100);
106 113
107 if (format > 0xffff) 114 if (format > 0xffff)
108 return -EINVAL; 115 return -ERANGE;
116
117 if (dev->chipset >= 0x30) {
118 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
119 return -ERANGE;
120 } else {
121 if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3))
122 return -ERANGE;
123 }
109 124
110 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); 125 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
111 if (ret) 126 if (ret)
@@ -113,12 +128,6 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
113 128
114 nv_plane->cur = nv_fb->nvbo; 129 nv_plane->cur = nv_fb->nvbo;
115 130
116 /* Source parameters given in 16.16 fixed point, ignore fractional. */
117 src_x = src_x >> 16;
118 src_y = src_y >> 16;
119 src_w = src_w >> 16;
120 src_h = src_h >> 16;
121
122 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); 131 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
123 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); 132 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
124 133
@@ -245,14 +254,25 @@ nv10_overlay_init(struct drm_device *device)
245{ 254{
246 struct nouveau_device *dev = nouveau_dev(device); 255 struct nouveau_device *dev = nouveau_dev(device);
247 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); 256 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
257 int num_formats = ARRAY_SIZE(formats);
248 int ret; 258 int ret;
249 259
250 if (!plane) 260 if (!plane)
251 return; 261 return;
252 262
263 switch (dev->chipset) {
264 case 0x10:
265 case 0x11:
266 case 0x15:
267 case 0x1a:
268 case 0x20:
269 num_formats = 1;
270 break;
271 }
272
253 ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */, 273 ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
254 &nv10_plane_funcs, 274 &nv10_plane_funcs,
255 formats, ARRAY_SIZE(formats), false); 275 formats, num_formats, false);
256 if (ret) 276 if (ret)
257 goto err; 277 goto err;
258 278
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 07273a2ae62f..95c740454049 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -256,7 +256,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
256 acpi_handle dhandle; 256 acpi_handle dhandle;
257 int retval = 0; 257 int retval = 0;
258 258
259 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 259 dhandle = ACPI_HANDLE(&pdev->dev);
260 if (!dhandle) 260 if (!dhandle)
261 return false; 261 return false;
262 262
@@ -414,7 +414,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
414 if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) 414 if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
415 return false; 415 return false;
416 416
417 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 417 dhandle = ACPI_HANDLE(&pdev->dev);
418 if (!dhandle) 418 if (!dhandle)
419 return false; 419 return false;
420 420
@@ -448,7 +448,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
448 return NULL; 448 return NULL;
449 } 449 }
450 450
451 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); 451 handle = ACPI_HANDLE(&dev->pdev->dev);
452 if (!handle) 452 if (!handle)
453 return NULL; 453 return NULL;
454 454
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7809d92183c4..29c3efdfc7dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -608,6 +608,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
608 fence = nouveau_fence_ref(new_bo->bo.sync_obj); 608 fence = nouveau_fence_ref(new_bo->bo.sync_obj);
609 spin_unlock(&new_bo->bo.bdev->fence_lock); 609 spin_unlock(&new_bo->bo.bdev->fence_lock);
610 ret = nouveau_fence_sync(fence, chan); 610 ret = nouveau_fence_sync(fence, chan);
611 nouveau_fence_unref(&fence);
611 if (ret) 612 if (ret)
612 return ret; 613 return ret;
613 614
@@ -701,7 +702,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
701 702
702 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 703 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
703 if (s->event) 704 if (s->event)
704 drm_send_vblank_event(dev, -1, s->event); 705 drm_send_vblank_event(dev, s->crtc, s->event);
705 706
706 list_del(&s->head); 707 list_del(&s->head);
707 if (ps) 708 if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 38a4db5bfe21..4aff04fa483c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -630,7 +630,6 @@ error:
630 hwmon->hwmon = NULL; 630 hwmon->hwmon = NULL;
631 return ret; 631 return ret;
632#else 632#else
633 hwmon->hwmon = NULL;
634 return 0; 633 return 0;
635#endif 634#endif
636} 635}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f8e66c08b11a..4e384a2f99c3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1265 uint32_t start, uint32_t size) 1265 uint32_t start, uint32_t size)
1266{ 1266{
1267 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1267 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1268 u32 end = max(start + size, (u32)256); 1268 u32 end = min_t(u32, start + size, 256);
1269 u32 i; 1269 u32 i;
1270 1270
1271 for (i = start; i < end; i++) { 1271 for (i = start; i < end; i++) {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 0109a9644cb2..821ab7b9409b 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -92,6 +92,7 @@ qxl_release_free(struct qxl_device *qdev,
92 - DRM_FILE_OFFSET); 92 - DRM_FILE_OFFSET);
93 qxl_fence_remove_release(&bo->fence, release->id); 93 qxl_fence_remove_release(&bo->fence, release->id);
94 qxl_bo_unref(&bo); 94 qxl_bo_unref(&bo);
95 kfree(entry);
95 } 96 }
96 spin_lock(&qdev->release_idr_lock); 97 spin_lock(&qdev->release_idr_lock);
97 idr_remove(&qdev->release_idr, release->id); 98 idr_remove(&qdev->release_idr, release->id);
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index deaf98cdca3a..f685035dbe39 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
44 PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; 44 PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
45 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); 45 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
46 unsigned char *base; 46 unsigned char *base;
47 u16 out; 47 u16 out = cpu_to_le16(0);
48 48
49 memset(&args, 0, sizeof(args)); 49 memset(&args, 0, sizeof(args));
50 50
@@ -55,9 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
55 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); 55 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
56 return -EINVAL; 56 return -EINVAL;
57 } 57 }
58 args.ucRegIndex = buf[0]; 58 if (buf == NULL)
59 if (num > 1) 59 args.ucRegIndex = 0;
60 memcpy(&out, &buf[1], num - 1); 60 else
61 args.ucRegIndex = buf[0];
62 if (num)
63 num--;
64 if (num)
65 memcpy(&out, &buf[1], num);
61 args.lpI2CDataOut = cpu_to_le16(out); 66 args.lpI2CDataOut = cpu_to_le16(out);
62 } else { 67 } else {
63 if (num > ATOM_MAX_HW_I2C_READ) { 68 if (num > ATOM_MAX_HW_I2C_READ) {
@@ -94,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
94 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); 99 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
95 struct i2c_msg *p; 100 struct i2c_msg *p;
96 int i, remaining, current_count, buffer_offset, max_bytes, ret; 101 int i, remaining, current_count, buffer_offset, max_bytes, ret;
97 u8 buf = 0, flags; 102 u8 flags;
98 103
99 /* check for bus probe */ 104 /* check for bus probe */
100 p = &msgs[0]; 105 p = &msgs[0];
101 if ((num == 1) && (p->len == 0)) { 106 if ((num == 1) && (p->len == 0)) {
102 ret = radeon_process_i2c_ch(i2c, 107 ret = radeon_process_i2c_ch(i2c,
103 p->addr, HW_I2C_WRITE, 108 p->addr, HW_I2C_WRITE,
104 &buf, 1); 109 NULL, 0);
105 if (ret) 110 if (ret)
106 return ret; 111 return ret;
107 else 112 else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ae92aa041c6a..b43a3a3c9067 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1560,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev)
1560 * cik_mm_rdoorbell - read a doorbell dword 1560 * cik_mm_rdoorbell - read a doorbell dword
1561 * 1561 *
1562 * @rdev: radeon_device pointer 1562 * @rdev: radeon_device pointer
1563 * @offset: byte offset into the aperture 1563 * @index: doorbell index
1564 * 1564 *
1565 * Returns the value in the doorbell aperture at the 1565 * Returns the value in the doorbell aperture at the
1566 * requested offset (CIK). 1566 * requested doorbell index (CIK).
1567 */ 1567 */
1568u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset) 1568u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
1569{ 1569{
1570 if (offset < rdev->doorbell.size) { 1570 if (index < rdev->doorbell.num_doorbells) {
1571 return readl(((void __iomem *)rdev->doorbell.ptr) + offset); 1571 return readl(rdev->doorbell.ptr + index);
1572 } else { 1572 } else {
1573 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset); 1573 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
1574 return 0; 1574 return 0;
1575 } 1575 }
1576} 1576}
@@ -1579,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
1579 * cik_mm_wdoorbell - write a doorbell dword 1579 * cik_mm_wdoorbell - write a doorbell dword
1580 * 1580 *
1581 * @rdev: radeon_device pointer 1581 * @rdev: radeon_device pointer
1582 * @offset: byte offset into the aperture 1582 * @index: doorbell index
1583 * @v: value to write 1583 * @v: value to write
1584 * 1584 *
1585 * Writes @v to the doorbell aperture at the 1585 * Writes @v to the doorbell aperture at the
1586 * requested offset (CIK). 1586 * requested doorbell index (CIK).
1587 */ 1587 */
1588void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v) 1588void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
1589{ 1589{
1590 if (offset < rdev->doorbell.size) { 1590 if (index < rdev->doorbell.num_doorbells) {
1591 writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset); 1591 writel(v, rdev->doorbell.ptr + index);
1592 } else { 1592 } else {
1593 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset); 1593 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
1594 } 1594 }
1595} 1595}
1596 1596
@@ -2427,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2427 gb_tile_moden = 0; 2427 gb_tile_moden = 0;
2428 break; 2428 break;
2429 } 2429 }
2430 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2430 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2431 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2431 } 2432 }
2432 } else if (num_pipe_configs == 4) { 2433 } else if (num_pipe_configs == 4) {
@@ -2773,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2773 gb_tile_moden = 0; 2774 gb_tile_moden = 0;
2774 break; 2775 break;
2775 } 2776 }
2777 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2776 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2778 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2777 } 2779 }
2778 } else if (num_pipe_configs == 2) { 2780 } else if (num_pipe_configs == 2) {
@@ -2990,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2990 gb_tile_moden = 0; 2992 gb_tile_moden = 0;
2991 break; 2993 break;
2992 } 2994 }
2995 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2993 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2996 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2994 } 2997 }
2995 } else 2998 } else
@@ -3556,17 +3559,24 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
3556 radeon_ring_write(ring, 0); 3559 radeon_ring_write(ring, 0);
3557} 3560}
3558 3561
3559void cik_semaphore_ring_emit(struct radeon_device *rdev, 3562bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3560 struct radeon_ring *ring, 3563 struct radeon_ring *ring,
3561 struct radeon_semaphore *semaphore, 3564 struct radeon_semaphore *semaphore,
3562 bool emit_wait) 3565 bool emit_wait)
3563{ 3566{
3567/* TODO: figure out why semaphore cause lockups */
3568#if 0
3564 uint64_t addr = semaphore->gpu_addr; 3569 uint64_t addr = semaphore->gpu_addr;
3565 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 3570 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
3566 3571
3567 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 3572 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
3568 radeon_ring_write(ring, addr & 0xffffffff); 3573 radeon_ring_write(ring, addr & 0xffffffff);
3569 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3574 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
3575
3576 return true;
3577#else
3578 return false;
3579#endif
3570} 3580}
3571 3581
3572/** 3582/**
@@ -3609,13 +3619,8 @@ int cik_copy_cpdma(struct radeon_device *rdev,
3609 return r; 3619 return r;
3610 } 3620 }
3611 3621
3612 if (radeon_fence_need_sync(*fence, ring->idx)) { 3622 radeon_semaphore_sync_to(sem, *fence);
3613 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 3623 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
3614 ring->idx);
3615 radeon_fence_note_sync(*fence, ring->idx);
3616 } else {
3617 radeon_semaphore_free(rdev, &sem, NULL);
3618 }
3619 3624
3620 for (i = 0; i < num_loops; i++) { 3625 for (i = 0; i < num_loops; i++) {
3621 cur_size_in_bytes = size_in_bytes; 3626 cur_size_in_bytes = size_in_bytes;
@@ -4052,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev,
4052 struct radeon_ring *ring) 4057 struct radeon_ring *ring)
4053{ 4058{
4054 rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr); 4059 rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
4055 WDOORBELL32(ring->doorbell_offset, ring->wptr); 4060 WDOORBELL32(ring->doorbell_index, ring->wptr);
4056} 4061}
4057 4062
4058/** 4063/**
@@ -4393,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
4393 return r; 4398 return r;
4394 } 4399 }
4395 4400
4396 /* doorbell offset */
4397 rdev->ring[idx].doorbell_offset =
4398 (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
4399
4400 /* init the mqd struct */ 4401 /* init the mqd struct */
4401 memset(buf, 0, sizeof(struct bonaire_mqd)); 4402 memset(buf, 0, sizeof(struct bonaire_mqd));
4402 4403
@@ -4508,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
4508 RREG32(CP_HQD_PQ_DOORBELL_CONTROL); 4509 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
4509 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK; 4510 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
4510 mqd->queue_state.cp_hqd_pq_doorbell_control |= 4511 mqd->queue_state.cp_hqd_pq_doorbell_control |=
4511 DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4); 4512 DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
4512 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN; 4513 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
4513 mqd->queue_state.cp_hqd_pq_doorbell_control &= 4514 mqd->queue_state.cp_hqd_pq_doorbell_control &=
4514 ~(DOORBELL_SOURCE | DOORBELL_HIT); 4515 ~(DOORBELL_SOURCE | DOORBELL_HIT);
@@ -7839,14 +7840,14 @@ int cik_init(struct radeon_device *rdev)
7839 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 7840 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7840 ring->ring_obj = NULL; 7841 ring->ring_obj = NULL;
7841 r600_ring_init(rdev, ring, 1024 * 1024); 7842 r600_ring_init(rdev, ring, 1024 * 1024);
7842 r = radeon_doorbell_get(rdev, &ring->doorbell_page_num); 7843 r = radeon_doorbell_get(rdev, &ring->doorbell_index);
7843 if (r) 7844 if (r)
7844 return r; 7845 return r;
7845 7846
7846 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 7847 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7847 ring->ring_obj = NULL; 7848 ring->ring_obj = NULL;
7848 r600_ring_init(rdev, ring, 1024 * 1024); 7849 r600_ring_init(rdev, ring, 1024 * 1024);
7849 r = radeon_doorbell_get(rdev, &ring->doorbell_page_num); 7850 r = radeon_doorbell_get(rdev, &ring->doorbell_index);
7850 if (r) 7851 if (r)
7851 return r; 7852 return r;
7852 7853
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 9c9529de20ee..0300727a4f70 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -130,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
130 * Add a DMA semaphore packet to the ring wait on or signal 130 * Add a DMA semaphore packet to the ring wait on or signal
131 * other rings (CIK). 131 * other rings (CIK).
132 */ 132 */
133void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 133bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
134 struct radeon_ring *ring, 134 struct radeon_ring *ring,
135 struct radeon_semaphore *semaphore, 135 struct radeon_semaphore *semaphore,
136 bool emit_wait) 136 bool emit_wait)
@@ -141,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
141 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); 141 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
142 radeon_ring_write(ring, addr & 0xfffffff8); 142 radeon_ring_write(ring, addr & 0xfffffff8);
143 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 143 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
144
145 return true;
144} 146}
145 147
146/** 148/**
@@ -443,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev,
443 return r; 445 return r;
444 } 446 }
445 447
446 if (radeon_fence_need_sync(*fence, ring->idx)) { 448 radeon_semaphore_sync_to(sem, *fence);
447 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 449 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
448 ring->idx);
449 radeon_fence_note_sync(*fence, ring->idx);
450 } else {
451 radeon_semaphore_free(rdev, &sem, NULL);
452 }
453 450
454 for (i = 0; i < num_loops; i++) { 451 for (i = 0; i < num_loops; i++) {
455 cur_size_in_bytes = size_in_bytes; 452 cur_size_in_bytes = size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 91bb470de0a3..920e1e4a52c5 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev)
299static int cypress_pcie_performance_request(struct radeon_device *rdev, 299static int cypress_pcie_performance_request(struct radeon_device *rdev,
300 u8 perf_req, bool advertise) 300 u8 perf_req, bool advertise)
301{ 301{
302#if defined(CONFIG_ACPI)
302 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 303 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
304#endif
303 u32 tmp; 305 u32 tmp;
304 306
305 udelay(10); 307 udelay(10);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 009f46e0ce72..de86493cbc44 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,11 +93,13 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
93 struct radeon_device *rdev = encoder->dev->dev_private; 93 struct radeon_device *rdev = encoder->dev->dev_private;
94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
96 u32 offset = dig->afmt->offset; 96 u32 offset;
97 97
98 if (!dig->afmt->pin) 98 if (!dig || !dig->afmt || !dig->afmt->pin)
99 return; 99 return;
100 100
101 offset = dig->afmt->offset;
102
101 WREG32(AFMT_AUDIO_SRC_CONTROL + offset, 103 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); 104 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
103} 105}
@@ -112,7 +114,7 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
112 struct radeon_connector *radeon_connector = NULL; 114 struct radeon_connector *radeon_connector = NULL;
113 u32 tmp = 0, offset; 115 u32 tmp = 0, offset;
114 116
115 if (!dig->afmt->pin) 117 if (!dig || !dig->afmt || !dig->afmt->pin)
116 return; 118 return;
117 119
118 offset = dig->afmt->pin->offset; 120 offset = dig->afmt->pin->offset;
@@ -156,7 +158,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
156 u8 *sadb; 158 u8 *sadb;
157 int sad_count; 159 int sad_count;
158 160
159 if (!dig->afmt->pin) 161 if (!dig || !dig->afmt || !dig->afmt->pin)
160 return; 162 return;
161 163
162 offset = dig->afmt->pin->offset; 164 offset = dig->afmt->pin->offset;
@@ -217,7 +219,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
217 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 219 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
218 }; 220 };
219 221
220 if (!dig->afmt->pin) 222 if (!dig || !dig->afmt || !dig->afmt->pin)
221 return; 223 return;
222 224
223 offset = dig->afmt->pin->offset; 225 offset = dig->afmt->pin->offset;
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 6a0656d00ed0..a37b54436382 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev,
131 return r; 131 return r;
132 } 132 }
133 133
134 if (radeon_fence_need_sync(*fence, ring->idx)) { 134 radeon_semaphore_sync_to(sem, *fence);
135 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 135 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
136 ring->idx);
137 radeon_fence_note_sync(*fence, ring->idx);
138 } else {
139 radeon_semaphore_free(rdev, &sem, NULL);
140 }
141 136
142 for (i = 0; i < num_loops; i++) { 137 for (i = 0; i < num_loops; i++) {
143 cur_size_in_dw = size_in_dw; 138 cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f26339028154..49c4d48f54d6 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -785,8 +785,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
785 struct ni_ps *ps = ni_get_ps(rps); 785 struct ni_ps *ps = ni_get_ps(rps);
786 struct radeon_clock_and_voltage_limits *max_limits; 786 struct radeon_clock_and_voltage_limits *max_limits;
787 bool disable_mclk_switching; 787 bool disable_mclk_switching;
788 u32 mclk, sclk; 788 u32 mclk;
789 u16 vddc, vddci; 789 u16 vddci;
790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; 790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
791 int i; 791 int i;
792 792
@@ -839,24 +839,14 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
839 839
840 /* XXX validate the min clocks required for display */ 840 /* XXX validate the min clocks required for display */
841 841
842 /* adjust low state */
842 if (disable_mclk_switching) { 843 if (disable_mclk_switching) {
843 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 844 ps->performance_levels[0].mclk =
844 sclk = ps->performance_levels[0].sclk; 845 ps->performance_levels[ps->performance_level_count - 1].mclk;
845 vddc = ps->performance_levels[0].vddc; 846 ps->performance_levels[0].vddci =
846 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; 847 ps->performance_levels[ps->performance_level_count - 1].vddci;
847 } else {
848 sclk = ps->performance_levels[0].sclk;
849 mclk = ps->performance_levels[0].mclk;
850 vddc = ps->performance_levels[0].vddc;
851 vddci = ps->performance_levels[0].vddci;
852 } 848 }
853 849
854 /* adjusted low state */
855 ps->performance_levels[0].sclk = sclk;
856 ps->performance_levels[0].mclk = mclk;
857 ps->performance_levels[0].vddc = vddc;
858 ps->performance_levels[0].vddci = vddci;
859
860 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, 850 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
861 &ps->performance_levels[0].sclk, 851 &ps->performance_levels[0].sclk,
862 &ps->performance_levels[0].mclk); 852 &ps->performance_levels[0].mclk);
@@ -868,11 +858,15 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
868 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; 858 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
869 } 859 }
870 860
861 /* adjust remaining states */
871 if (disable_mclk_switching) { 862 if (disable_mclk_switching) {
872 mclk = ps->performance_levels[0].mclk; 863 mclk = ps->performance_levels[0].mclk;
864 vddci = ps->performance_levels[0].vddci;
873 for (i = 1; i < ps->performance_level_count; i++) { 865 for (i = 1; i < ps->performance_level_count; i++) {
874 if (mclk < ps->performance_levels[i].mclk) 866 if (mclk < ps->performance_levels[i].mclk)
875 mclk = ps->performance_levels[i].mclk; 867 mclk = ps->performance_levels[i].mclk;
868 if (vddci < ps->performance_levels[i].vddci)
869 vddci = ps->performance_levels[i].vddci;
876 } 870 }
877 for (i = 0; i < ps->performance_level_count; i++) { 871 for (i = 0; i < ps->performance_level_count; i++) {
878 ps->performance_levels[i].mclk = mclk; 872 ps->performance_levels[i].mclk = mclk;
@@ -3445,9 +3439,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
3445static int ni_pcie_performance_request(struct radeon_device *rdev, 3439static int ni_pcie_performance_request(struct radeon_device *rdev,
3446 u8 perf_req, bool advertise) 3440 u8 perf_req, bool advertise)
3447{ 3441{
3442#if defined(CONFIG_ACPI)
3448 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3443 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3449 3444
3450#if defined(CONFIG_ACPI)
3451 if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) || 3445 if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
3452 (perf_req == PCIE_PERF_REQ_PECI_GEN2)) { 3446 (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
3453 if (eg_pi->pcie_performance_request_registered == false) 3447 if (eg_pi->pcie_performance_request_registered == false)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 784983d78158..10abc4d5a6cc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
869 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 869 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
870} 870}
871 871
872void r100_semaphore_ring_emit(struct radeon_device *rdev, 872bool r100_semaphore_ring_emit(struct radeon_device *rdev,
873 struct radeon_ring *ring, 873 struct radeon_ring *ring,
874 struct radeon_semaphore *semaphore, 874 struct radeon_semaphore *semaphore,
875 bool emit_wait) 875 bool emit_wait)
876{ 876{
877 /* Unused on older asics, since we don't have semaphores or multiple rings */ 877 /* Unused on older asics, since we don't have semaphores or multiple rings */
878 BUG(); 878 BUG();
879 return false;
879} 880}
880 881
881int r100_copy_blit(struct radeon_device *rdev, 882int r100_copy_blit(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4e609e8a8d2b..9ad06732a78b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2650,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2650 } 2650 }
2651} 2651}
2652 2652
2653void r600_semaphore_ring_emit(struct radeon_device *rdev, 2653bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2654 struct radeon_ring *ring, 2654 struct radeon_ring *ring,
2655 struct radeon_semaphore *semaphore, 2655 struct radeon_semaphore *semaphore,
2656 bool emit_wait) 2656 bool emit_wait)
@@ -2664,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
2664 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 2664 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2665 radeon_ring_write(ring, addr & 0xffffffff); 2665 radeon_ring_write(ring, addr & 0xffffffff);
2666 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2666 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2667
2668 return true;
2667} 2669}
2668 2670
2669/** 2671/**
@@ -2706,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2706 return r; 2708 return r;
2707 } 2709 }
2708 2710
2709 if (radeon_fence_need_sync(*fence, ring->idx)) { 2711 radeon_semaphore_sync_to(sem, *fence);
2710 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 2712 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
2711 ring->idx);
2712 radeon_fence_note_sync(*fence, ring->idx);
2713 } else {
2714 radeon_semaphore_free(rdev, &sem, NULL);
2715 }
2716 2713
2717 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2714 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2718 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2715 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 3b317456512a..7844d15c139f 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev,
311 * Add a DMA semaphore packet to the ring wait on or signal 311 * Add a DMA semaphore packet to the ring wait on or signal
312 * other rings (r6xx-SI). 312 * other rings (r6xx-SI).
313 */ 313 */
314void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, 314bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
315 struct radeon_ring *ring, 315 struct radeon_ring *ring,
316 struct radeon_semaphore *semaphore, 316 struct radeon_semaphore *semaphore,
317 bool emit_wait) 317 bool emit_wait)
@@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
322 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); 322 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
323 radeon_ring_write(ring, addr & 0xfffffffc); 323 radeon_ring_write(ring, addr & 0xfffffffc);
324 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 324 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
325
326 return true;
325} 327}
326 328
327/** 329/**
@@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev,
462 return r; 464 return r;
463 } 465 }
464 466
465 if (radeon_fence_need_sync(*fence, ring->idx)) { 467 radeon_semaphore_sync_to(sem, *fence);
466 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 468 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
467 ring->idx);
468 radeon_fence_note_sync(*fence, ring->idx);
469 } else {
470 radeon_semaphore_free(rdev, &sem, NULL);
471 }
472 469
473 for (i = 0; i < num_loops; i++) { 470 for (i = 0; i < num_loops; i++) {
474 cur_size_in_dw = size_in_dw; 471 cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 4b89262f3f0e..b7d3ecba43e3 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
304 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); 304 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
305 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 305 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
306 } 306 }
307 } else if (ASIC_IS_DCE3(rdev)) { 307 } else {
308 /* according to the reg specs, this should DCE3.2 only, but in 308 /* according to the reg specs, this should DCE3.2 only, but in
309 * practice it seems to cover DCE3.0/3.1 as well. 309 * practice it seems to cover DCE2.0/3.0/3.1 as well.
310 */ 310 */
311 if (dig->dig_encoder == 0) { 311 if (dig->dig_encoder == 0) {
312 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); 312 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
@@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
317 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); 317 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
318 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 318 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
319 } 319 }
320 } else {
321 /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
322 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
323 AUDIO_DTO_MODULE(clock / 10));
324 } 320 }
325} 321}
326 322
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b9ee99258602..b1f990d0eaa1 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
348void radeon_fence_process(struct radeon_device *rdev, int ring); 348void radeon_fence_process(struct radeon_device *rdev, int ring);
349bool radeon_fence_signaled(struct radeon_fence *fence); 349bool radeon_fence_signaled(struct radeon_fence *fence);
350int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 350int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
351int radeon_fence_wait_locked(struct radeon_fence *fence);
351int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); 352int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
352int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); 353int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
353int radeon_fence_wait_any(struct radeon_device *rdev, 354int radeon_fence_wait_any(struct radeon_device *rdev,
@@ -548,17 +549,20 @@ struct radeon_semaphore {
548 struct radeon_sa_bo *sa_bo; 549 struct radeon_sa_bo *sa_bo;
549 signed waiters; 550 signed waiters;
550 uint64_t gpu_addr; 551 uint64_t gpu_addr;
552 struct radeon_fence *sync_to[RADEON_NUM_RINGS];
551}; 553};
552 554
553int radeon_semaphore_create(struct radeon_device *rdev, 555int radeon_semaphore_create(struct radeon_device *rdev,
554 struct radeon_semaphore **semaphore); 556 struct radeon_semaphore **semaphore);
555void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 557bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
556 struct radeon_semaphore *semaphore); 558 struct radeon_semaphore *semaphore);
557void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 559bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
558 struct radeon_semaphore *semaphore); 560 struct radeon_semaphore *semaphore);
561void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
562 struct radeon_fence *fence);
559int radeon_semaphore_sync_rings(struct radeon_device *rdev, 563int radeon_semaphore_sync_rings(struct radeon_device *rdev,
560 struct radeon_semaphore *semaphore, 564 struct radeon_semaphore *semaphore,
561 int signaler, int waiter); 565 int waiting_ring);
562void radeon_semaphore_free(struct radeon_device *rdev, 566void radeon_semaphore_free(struct radeon_device *rdev,
563 struct radeon_semaphore **semaphore, 567 struct radeon_semaphore **semaphore,
564 struct radeon_fence *fence); 568 struct radeon_fence *fence);
@@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
645/* 649/*
646 * GPU doorbell structures, functions & helpers 650 * GPU doorbell structures, functions & helpers
647 */ 651 */
652#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
653
648struct radeon_doorbell { 654struct radeon_doorbell {
649 u32 num_pages;
650 bool free[1024];
651 /* doorbell mmio */ 655 /* doorbell mmio */
652 resource_size_t base; 656 resource_size_t base;
653 resource_size_t size; 657 resource_size_t size;
654 void __iomem *ptr; 658 u32 __iomem *ptr;
659 u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
660 unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
655}; 661};
656 662
657int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); 663int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
@@ -765,7 +771,6 @@ struct radeon_ib {
765 struct radeon_fence *fence; 771 struct radeon_fence *fence;
766 struct radeon_vm *vm; 772 struct radeon_vm *vm;
767 bool is_const_ib; 773 bool is_const_ib;
768 struct radeon_fence *sync_to[RADEON_NUM_RINGS];
769 struct radeon_semaphore *semaphore; 774 struct radeon_semaphore *semaphore;
770}; 775};
771 776
@@ -799,8 +804,7 @@ struct radeon_ring {
799 u32 pipe; 804 u32 pipe;
800 u32 queue; 805 u32 queue;
801 struct radeon_bo *mqd_obj; 806 struct radeon_bo *mqd_obj;
802 u32 doorbell_page_num; 807 u32 doorbell_index;
803 u32 doorbell_offset;
804 unsigned wptr_offs; 808 unsigned wptr_offs;
805}; 809};
806 810
@@ -921,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
921 struct radeon_ib *ib, struct radeon_vm *vm, 925 struct radeon_ib *ib, struct radeon_vm *vm,
922 unsigned size); 926 unsigned size);
923void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 927void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
924void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
925int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 928int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
926 struct radeon_ib *const_ib); 929 struct radeon_ib *const_ib);
927int radeon_ib_pool_init(struct radeon_device *rdev); 930int radeon_ib_pool_init(struct radeon_device *rdev);
@@ -1638,7 +1641,7 @@ struct radeon_asic_ring {
1638 /* command emmit functions */ 1641 /* command emmit functions */
1639 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 1642 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1640 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); 1643 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1641 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, 1644 bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1642 struct radeon_semaphore *semaphore, bool emit_wait); 1645 struct radeon_semaphore *semaphore, bool emit_wait);
1643 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 1646 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
1644 1647
@@ -1979,6 +1982,7 @@ struct cik_asic {
1979 1982
1980 unsigned tile_config; 1983 unsigned tile_config;
1981 uint32_t tile_mode_array[32]; 1984 uint32_t tile_mode_array[32];
1985 uint32_t macrotile_mode_array[16];
1982}; 1986};
1983 1987
1984union radeon_asic_config { 1988union radeon_asic_config {
@@ -2239,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
2239u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 2243u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
2240void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2244void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2241 2245
2242u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset); 2246u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
2243void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); 2247void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
2244 2248
2245/* 2249/*
2246 * Cast helper 2250 * Cast helper
@@ -2303,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
2303#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) 2307#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
2304#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) 2308#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
2305 2309
2306#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset)) 2310#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
2307#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v)) 2311#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
2308 2312
2309/* 2313/*
2310 * Indirect registers accessor 2314 * Indirect registers accessor
@@ -2706,10 +2710,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
2706 struct radeon_vm *vm, 2710 struct radeon_vm *vm,
2707 struct radeon_fence *fence); 2711 struct radeon_fence *fence);
2708uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 2712uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2709int radeon_vm_bo_update_pte(struct radeon_device *rdev, 2713int radeon_vm_bo_update(struct radeon_device *rdev,
2710 struct radeon_vm *vm, 2714 struct radeon_vm *vm,
2711 struct radeon_bo *bo, 2715 struct radeon_bo *bo,
2712 struct ttm_mem_reg *mem); 2716 struct ttm_mem_reg *mem);
2713void radeon_vm_bo_invalidate(struct radeon_device *rdev, 2717void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2714 struct radeon_bo *bo); 2718 struct radeon_bo *bo);
2715struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, 2719struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 10f98c7742d8..98a9074b306b 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -369,7 +369,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
369 return NOTIFY_DONE; 369 return NOTIFY_DONE;
370 370
371 /* Check pending SBIOS requests */ 371 /* Check pending SBIOS requests */
372 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); 372 handle = ACPI_HANDLE(&rdev->pdev->dev);
373 count = radeon_atif_get_sbios_requests(handle, &req); 373 count = radeon_atif_get_sbios_requests(handle, &req);
374 374
375 if (count <= 0) 375 if (count <= 0)
@@ -556,7 +556,7 @@ int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev)
556 struct radeon_atcs *atcs = &rdev->atcs; 556 struct radeon_atcs *atcs = &rdev->atcs;
557 557
558 /* Get the device handle */ 558 /* Get the device handle */
559 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); 559 handle = ACPI_HANDLE(&rdev->pdev->dev);
560 if (!handle) 560 if (!handle)
561 return -EINVAL; 561 return -EINVAL;
562 562
@@ -596,7 +596,7 @@ int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
596 u32 retry = 3; 596 u32 retry = 3;
597 597
598 /* Get the device handle */ 598 /* Get the device handle */
599 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); 599 handle = ACPI_HANDLE(&rdev->pdev->dev);
600 if (!handle) 600 if (!handle)
601 return -EINVAL; 601 return -EINVAL;
602 602
@@ -699,7 +699,7 @@ int radeon_acpi_init(struct radeon_device *rdev)
699 int ret; 699 int ret;
700 700
701 /* Get the device handle */ 701 /* Get the device handle */
702 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); 702 handle = ACPI_HANDLE(&rdev->pdev->dev);
703 703
704 /* No need to proceed if we're sure that ATIF is not supported */ 704 /* No need to proceed if we're sure that ATIF is not supported */
705 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) 705 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 50853c0cb49d..e354ce94cdd1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2015,6 +2015,8 @@ static struct radeon_asic ci_asic = {
2015 .bandwidth_update = &dce8_bandwidth_update, 2015 .bandwidth_update = &dce8_bandwidth_update,
2016 .get_vblank_counter = &evergreen_get_vblank_counter, 2016 .get_vblank_counter = &evergreen_get_vblank_counter,
2017 .wait_for_vblank = &dce4_wait_for_vblank, 2017 .wait_for_vblank = &dce4_wait_for_vblank,
2018 .set_backlight_level = &atombios_set_backlight_level,
2019 .get_backlight_level = &atombios_get_backlight_level,
2018 .hdmi_enable = &evergreen_hdmi_enable, 2020 .hdmi_enable = &evergreen_hdmi_enable,
2019 .hdmi_setmode = &evergreen_hdmi_setmode, 2021 .hdmi_setmode = &evergreen_hdmi_setmode,
2020 }, 2022 },
@@ -2114,6 +2116,8 @@ static struct radeon_asic kv_asic = {
2114 .bandwidth_update = &dce8_bandwidth_update, 2116 .bandwidth_update = &dce8_bandwidth_update,
2115 .get_vblank_counter = &evergreen_get_vblank_counter, 2117 .get_vblank_counter = &evergreen_get_vblank_counter,
2116 .wait_for_vblank = &dce4_wait_for_vblank, 2118 .wait_for_vblank = &dce4_wait_for_vblank,
2119 .set_backlight_level = &atombios_set_backlight_level,
2120 .get_backlight_level = &atombios_get_backlight_level,
2117 .hdmi_enable = &evergreen_hdmi_enable, 2121 .hdmi_enable = &evergreen_hdmi_enable,
2118 .hdmi_setmode = &evergreen_hdmi_setmode, 2122 .hdmi_setmode = &evergreen_hdmi_setmode,
2119 }, 2123 },
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2833ee3a613..c9fd97b58076 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev);
80int r100_irq_process(struct radeon_device *rdev); 80int r100_irq_process(struct radeon_device *rdev);
81void r100_fence_ring_emit(struct radeon_device *rdev, 81void r100_fence_ring_emit(struct radeon_device *rdev,
82 struct radeon_fence *fence); 82 struct radeon_fence *fence);
83void r100_semaphore_ring_emit(struct radeon_device *rdev, 83bool r100_semaphore_ring_emit(struct radeon_device *rdev,
84 struct radeon_ring *cp, 84 struct radeon_ring *cp,
85 struct radeon_semaphore *semaphore, 85 struct radeon_semaphore *semaphore,
86 bool emit_wait); 86 bool emit_wait);
@@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p);
313int r600_dma_cs_parse(struct radeon_cs_parser *p); 313int r600_dma_cs_parse(struct radeon_cs_parser *p);
314void r600_fence_ring_emit(struct radeon_device *rdev, 314void r600_fence_ring_emit(struct radeon_device *rdev,
315 struct radeon_fence *fence); 315 struct radeon_fence *fence);
316void r600_semaphore_ring_emit(struct radeon_device *rdev, 316bool r600_semaphore_ring_emit(struct radeon_device *rdev,
317 struct radeon_ring *cp, 317 struct radeon_ring *cp,
318 struct radeon_semaphore *semaphore, 318 struct radeon_semaphore *semaphore,
319 bool emit_wait); 319 bool emit_wait);
320void r600_dma_fence_ring_emit(struct radeon_device *rdev, 320void r600_dma_fence_ring_emit(struct radeon_device *rdev,
321 struct radeon_fence *fence); 321 struct radeon_fence *fence);
322void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, 322bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
323 struct radeon_ring *ring, 323 struct radeon_ring *ring,
324 struct radeon_semaphore *semaphore, 324 struct radeon_semaphore *semaphore,
325 bool emit_wait); 325 bool emit_wait);
@@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
566 */ 566 */
567void cayman_fence_ring_emit(struct radeon_device *rdev, 567void cayman_fence_ring_emit(struct radeon_device *rdev,
568 struct radeon_fence *fence); 568 struct radeon_fence *fence);
569void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
570 struct radeon_ring *ring,
571 struct radeon_semaphore *semaphore,
572 bool emit_wait);
573void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev); 569void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
574int cayman_init(struct radeon_device *rdev); 570int cayman_init(struct radeon_device *rdev);
575void cayman_fini(struct radeon_device *rdev); 571void cayman_fini(struct radeon_device *rdev);
@@ -697,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
697int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 693int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
698void cik_sdma_fence_ring_emit(struct radeon_device *rdev, 694void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
699 struct radeon_fence *fence); 695 struct radeon_fence *fence);
700void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 696bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
701 struct radeon_ring *ring, 697 struct radeon_ring *ring,
702 struct radeon_semaphore *semaphore, 698 struct radeon_semaphore *semaphore,
703 bool emit_wait); 699 bool emit_wait);
@@ -717,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
717 struct radeon_fence *fence); 713 struct radeon_fence *fence);
718void cik_fence_compute_ring_emit(struct radeon_device *rdev, 714void cik_fence_compute_ring_emit(struct radeon_device *rdev,
719 struct radeon_fence *fence); 715 struct radeon_fence *fence);
720void cik_semaphore_ring_emit(struct radeon_device *rdev, 716bool cik_semaphore_ring_emit(struct radeon_device *rdev,
721 struct radeon_ring *cp, 717 struct radeon_ring *cp,
722 struct radeon_semaphore *semaphore, 718 struct radeon_semaphore *semaphore,
723 bool emit_wait); 719 bool emit_wait);
@@ -807,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev);
807 803
808int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); 804int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
809int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 805int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
810void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, 806bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
811 struct radeon_ring *ring, 807 struct radeon_ring *ring,
812 struct radeon_semaphore *semaphore, 808 struct radeon_semaphore *semaphore,
813 bool emit_wait); 809 bool emit_wait);
@@ -819,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
819 struct radeon_fence *fence); 815 struct radeon_fence *fence);
820 816
821/* uvd v3.1 */ 817/* uvd v3.1 */
822void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, 818bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
823 struct radeon_ring *ring, 819 struct radeon_ring *ring,
824 struct radeon_semaphore *semaphore, 820 struct radeon_semaphore *semaphore,
825 bool emit_wait); 821 bool emit_wait);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f79ee184ffd5..5c39bf7c3d88 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
2918 mpll_param->dll_speed = args.ucDllSpeed; 2918 mpll_param->dll_speed = args.ucDllSpeed;
2919 mpll_param->bwcntl = args.ucBWCntl; 2919 mpll_param->bwcntl = args.ucBWCntl;
2920 mpll_param->vco_mode = 2920 mpll_param->vco_mode =
2921 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0; 2921 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
2922 mpll_param->yclk_sel = 2922 mpll_param->yclk_sel =
2923 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0; 2923 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
2924 mpll_param->qdr = 2924 mpll_param->qdr =
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 6153ec18943a..9d302eaeea15 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -8,8 +8,7 @@
8 */ 8 */
9#include <linux/vga_switcheroo.h> 9#include <linux/vga_switcheroo.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <acpi/acpi.h> 11#include <linux/acpi.h>
12#include <acpi/acpi_bus.h>
13#include <linux/pci.h> 12#include <linux/pci.h>
14 13
15#include "radeon_acpi.h" 14#include "radeon_acpi.h"
@@ -447,7 +446,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
447 acpi_handle dhandle, atpx_handle; 446 acpi_handle dhandle, atpx_handle;
448 acpi_status status; 447 acpi_status status;
449 448
450 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 449 dhandle = ACPI_HANDLE(&pdev->dev);
451 if (!dhandle) 450 if (!dhandle)
452 return false; 451 return false;
453 452
@@ -493,7 +492,7 @@ static int radeon_atpx_init(void)
493 */ 492 */
494static int radeon_atpx_get_client_id(struct pci_dev *pdev) 493static int radeon_atpx_get_client_id(struct pci_dev *pdev)
495{ 494{
496 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) 495 if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
497 return VGA_SWITCHEROO_IGD; 496 return VGA_SWITCHEROO_IGD;
498 else 497 else
499 return VGA_SWITCHEROO_DIS; 498 return VGA_SWITCHEROO_DIS;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index c155d6f3fa68..b3633d9a5317 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -185,7 +185,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
185 return false; 185 return false;
186 186
187 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 187 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
188 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 188 dhandle = ACPI_HANDLE(&pdev->dev);
189 if (!dhandle) 189 if (!dhandle)
190 continue; 190 continue;
191 191
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 26ca223d12d6..0b366169d64d 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
159 if (!p->relocs[i].robj) 159 if (!p->relocs[i].robj)
160 continue; 160 continue;
161 161
162 radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj); 162 radeon_semaphore_sync_to(p->ib.semaphore,
163 p->relocs[i].robj->tbo.sync_obj);
163 } 164 }
164} 165}
165 166
@@ -359,13 +360,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
359 struct radeon_bo *bo; 360 struct radeon_bo *bo;
360 int r; 361 int r;
361 362
362 r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); 363 r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
363 if (r) { 364 if (r) {
364 return r; 365 return r;
365 } 366 }
366 list_for_each_entry(lobj, &parser->validated, tv.head) { 367 list_for_each_entry(lobj, &parser->validated, tv.head) {
367 bo = lobj->bo; 368 bo = lobj->bo;
368 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); 369 r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
369 if (r) { 370 if (r) {
370 return r; 371 return r;
371 } 372 }
@@ -411,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
411 goto out; 412 goto out;
412 } 413 }
413 radeon_cs_sync_rings(parser); 414 radeon_cs_sync_rings(parser);
414 radeon_ib_sync_to(&parser->ib, vm->fence); 415 radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
415 radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id( 416 radeon_semaphore_sync_to(parser->ib.semaphore,
416 rdev, vm, parser->ring)); 417 radeon_vm_grab_id(rdev, vm, parser->ring));
417 418
418 if ((rdev->family >= CHIP_TAHITI) && 419 if ((rdev->family >= CHIP_TAHITI) &&
419 (parser->chunk_const_ib_idx != -1)) { 420 (parser->chunk_const_ib_idx != -1)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b9234c43f43d..39b033b441d2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -251,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
251 */ 251 */
252int radeon_doorbell_init(struct radeon_device *rdev) 252int radeon_doorbell_init(struct radeon_device *rdev)
253{ 253{
254 int i;
255
256 /* doorbell bar mapping */ 254 /* doorbell bar mapping */
257 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); 255 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
258 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2); 256 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
259 257
260 /* limit to 4 MB for now */ 258 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
261 if (rdev->doorbell.size > (4 * 1024 * 1024)) 259 if (rdev->doorbell.num_doorbells == 0)
262 rdev->doorbell.size = 4 * 1024 * 1024; 260 return -EINVAL;
263 261
264 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size); 262 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
265 if (rdev->doorbell.ptr == NULL) { 263 if (rdev->doorbell.ptr == NULL) {
266 return -ENOMEM; 264 return -ENOMEM;
267 } 265 }
268 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base); 266 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
269 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size); 267 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
270 268
271 rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE; 269 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
272 270
273 for (i = 0; i < rdev->doorbell.num_pages; i++) {
274 rdev->doorbell.free[i] = true;
275 }
276 return 0; 271 return 0;
277} 272}
278 273
@@ -290,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev)
290} 285}
291 286
292/** 287/**
293 * radeon_doorbell_get - Allocate a doorbell page 288 * radeon_doorbell_get - Allocate a doorbell entry
294 * 289 *
295 * @rdev: radeon_device pointer 290 * @rdev: radeon_device pointer
296 * @doorbell: doorbell page number 291 * @doorbell: doorbell index
297 * 292 *
298 * Allocate a doorbell page for use by the driver (all asics). 293 * Allocate a doorbell for use by the driver (all asics).
299 * Returns 0 on success or -EINVAL on failure. 294 * Returns 0 on success or -EINVAL on failure.
300 */ 295 */
301int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell) 296int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
302{ 297{
303 int i; 298 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
304 299 if (offset < rdev->doorbell.num_doorbells) {
305 for (i = 0; i < rdev->doorbell.num_pages; i++) { 300 __set_bit(offset, rdev->doorbell.used);
306 if (rdev->doorbell.free[i]) { 301 *doorbell = offset;
307 rdev->doorbell.free[i] = false; 302 return 0;
308 *doorbell = i; 303 } else {
309 return 0; 304 return -EINVAL;
310 }
311 } 305 }
312 return -EINVAL;
313} 306}
314 307
315/** 308/**
316 * radeon_doorbell_free - Free a doorbell page 309 * radeon_doorbell_free - Free a doorbell entry
317 * 310 *
318 * @rdev: radeon_device pointer 311 * @rdev: radeon_device pointer
319 * @doorbell: doorbell page number 312 * @doorbell: doorbell index
320 * 313 *
321 * Free a doorbell page allocated for use by the driver (all asics) 314 * Free a doorbell allocated for use by the driver (all asics)
322 */ 315 */
323void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell) 316void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
324{ 317{
325 if (doorbell < rdev->doorbell.num_pages) 318 if (doorbell < rdev->doorbell.num_doorbells)
326 rdev->doorbell.free[doorbell] = true; 319 __clear_bit(doorbell, rdev->doorbell.used);
327} 320}
328 321
329/* 322/*
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 1aee32213f66..9f5ff28864f6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -76,9 +76,10 @@
76 * 2.32.0 - new info request for rings working 76 * 2.32.0 - new info request for rings working
77 * 2.33.0 - Add SI tiling mode array query 77 * 2.33.0 - Add SI tiling mode array query
78 * 2.34.0 - Add CIK tiling mode array query 78 * 2.34.0 - Add CIK tiling mode array query
79 * 2.35.0 - Add CIK macrotile mode array query
79 */ 80 */
80#define KMS_DRIVER_MAJOR 2 81#define KMS_DRIVER_MAJOR 2
81#define KMS_DRIVER_MINOR 34 82#define KMS_DRIVER_MINOR 35
82#define KMS_DRIVER_PATCHLEVEL 0 83#define KMS_DRIVER_PATCHLEVEL 0
83int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 84int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
84int radeon_driver_unload_kms(struct drm_device *dev); 85int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 543dcfae7e6f..00e0d449021c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -108,9 +108,10 @@
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup 109 * 1.32- fixes for rv740 setup
110 * 1.33- Add r6xx/r7xx const buffer support 110 * 1.33- Add r6xx/r7xx const buffer support
111 * 1.34- fix evergreen/cayman GS register
111 */ 112 */
112#define DRIVER_MAJOR 1 113#define DRIVER_MAJOR 1
113#define DRIVER_MINOR 33 114#define DRIVER_MINOR 34
114#define DRIVER_PATCHLEVEL 0 115#define DRIVER_PATCHLEVEL 0
115 116
116long radeon_drm_ioctl(struct file *filp, 117long radeon_drm_ioctl(struct file *filp,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 281d14c22a47..d3a86e43c012 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -472,6 +472,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
472} 472}
473 473
474/** 474/**
475 * radeon_fence_wait_locked - wait for a fence to signal
476 *
477 * @fence: radeon fence object
478 *
479 * Wait for the requested fence to signal (all asics).
480 * Returns 0 if the fence has passed, error for all other cases.
481 */
482int radeon_fence_wait_locked(struct radeon_fence *fence)
483{
484 uint64_t seq[RADEON_NUM_RINGS] = {};
485 int r;
486
487 if (fence == NULL) {
488 WARN(1, "Querying an invalid fence : %p !\n", fence);
489 return -EINVAL;
490 }
491
492 seq[fence->ring] = fence->seq;
493 if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
494 return 0;
495
496 r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
497 if (r)
498 return r;
499
500 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
501 return 0;
502}
503
504/**
475 * radeon_fence_wait_next_locked - wait for the next fence to signal 505 * radeon_fence_wait_next_locked - wait for the next fence to signal
476 * 506 *
477 * @rdev: radeon device pointer 507 * @rdev: radeon device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 8a83b89d4709..96e440061bdb 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -29,6 +29,7 @@
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon_trace.h"
32 33
33/* 34/*
34 * GART 35 * GART
@@ -651,7 +652,7 @@ retry:
651 radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr, 652 radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
652 0, pd_entries, 0, 0); 653 0, pd_entries, 0, 0);
653 654
654 radeon_ib_sync_to(&ib, vm->fence); 655 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
655 r = radeon_ib_schedule(rdev, &ib, NULL); 656 r = radeon_ib_schedule(rdev, &ib, NULL);
656 if (r) { 657 if (r) {
657 radeon_ib_free(rdev, &ib); 658 radeon_ib_free(rdev, &ib);
@@ -737,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
737 for (i = 0; i < 2; ++i) { 738 for (i = 0; i < 2; ++i) {
738 if (choices[i]) { 739 if (choices[i]) {
739 vm->id = choices[i]; 740 vm->id = choices[i];
741 trace_radeon_vm_grab_id(vm->id, ring);
740 return rdev->vm_manager.active[choices[i]]; 742 return rdev->vm_manager.active[choices[i]];
741 } 743 }
742 } 744 }
@@ -1116,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1116} 1118}
1117 1119
1118/** 1120/**
1119 * radeon_vm_bo_update_pte - map a bo into the vm page table 1121 * radeon_vm_bo_update - map a bo into the vm page table
1120 * 1122 *
1121 * @rdev: radeon_device pointer 1123 * @rdev: radeon_device pointer
1122 * @vm: requested vm 1124 * @vm: requested vm
@@ -1128,10 +1130,10 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1128 * 1130 *
1129 * Object have to be reserved & global and local mutex must be locked! 1131 * Object have to be reserved & global and local mutex must be locked!
1130 */ 1132 */
1131int radeon_vm_bo_update_pte(struct radeon_device *rdev, 1133int radeon_vm_bo_update(struct radeon_device *rdev,
1132 struct radeon_vm *vm, 1134 struct radeon_vm *vm,
1133 struct radeon_bo *bo, 1135 struct radeon_bo *bo,
1134 struct ttm_mem_reg *mem) 1136 struct ttm_mem_reg *mem)
1135{ 1137{
1136 struct radeon_ib ib; 1138 struct radeon_ib ib;
1137 struct radeon_bo_va *bo_va; 1139 struct radeon_bo_va *bo_va;
@@ -1176,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1176 bo_va->valid = false; 1178 bo_va->valid = false;
1177 } 1179 }
1178 1180
1181 trace_radeon_vm_bo_update(bo_va);
1182
1179 nptes = radeon_bo_ngpu_pages(bo); 1183 nptes = radeon_bo_ngpu_pages(bo);
1180 1184
1181 /* assume two extra pdes in case the mapping overlaps the borders */ 1185 /* assume two extra pdes in case the mapping overlaps the borders */
@@ -1209,6 +1213,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1209 return -ENOMEM; 1213 return -ENOMEM;
1210 1214
1211 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); 1215 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
1216 if (r)
1217 return r;
1212 ib.length_dw = 0; 1218 ib.length_dw = 0;
1213 1219
1214 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); 1220 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1220,7 +1226,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1220 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, 1226 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
1221 addr, radeon_vm_page_flags(bo_va->flags)); 1227 addr, radeon_vm_page_flags(bo_va->flags));
1222 1228
1223 radeon_ib_sync_to(&ib, vm->fence); 1229 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
1224 r = radeon_ib_schedule(rdev, &ib, NULL); 1230 r = radeon_ib_schedule(rdev, &ib, NULL);
1225 if (r) { 1231 if (r) {
1226 radeon_ib_free(rdev, &ib); 1232 radeon_ib_free(rdev, &ib);
@@ -1255,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
1255 mutex_lock(&rdev->vm_manager.lock); 1261 mutex_lock(&rdev->vm_manager.lock);
1256 mutex_lock(&bo_va->vm->mutex); 1262 mutex_lock(&bo_va->vm->mutex);
1257 if (bo_va->soffset) { 1263 if (bo_va->soffset) {
1258 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1264 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
1259 } 1265 }
1260 mutex_unlock(&rdev->vm_manager.lock); 1266 mutex_unlock(&rdev->vm_manager.lock);
1261 list_del(&bo_va->vm_list); 1267 list_del(&bo_va->vm_list);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index bb8710531a1b..55d0b474bd37 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -340,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
340 break; 340 break;
341 case RADEON_INFO_BACKEND_MAP: 341 case RADEON_INFO_BACKEND_MAP:
342 if (rdev->family >= CHIP_BONAIRE) 342 if (rdev->family >= CHIP_BONAIRE)
343 return -EINVAL; 343 *value = rdev->config.cik.backend_map;
344 else if (rdev->family >= CHIP_TAHITI) 344 else if (rdev->family >= CHIP_TAHITI)
345 *value = rdev->config.si.backend_map; 345 *value = rdev->config.si.backend_map;
346 else if (rdev->family >= CHIP_CAYMAN) 346 else if (rdev->family >= CHIP_CAYMAN)
@@ -449,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
449 return -EINVAL; 449 return -EINVAL;
450 } 450 }
451 break; 451 break;
452 case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
453 if (rdev->family >= CHIP_BONAIRE) {
454 value = rdev->config.cik.macrotile_mode_array;
455 value_size = sizeof(uint32_t)*16;
456 } else {
457 DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
458 return -EINVAL;
459 }
460 break;
452 case RADEON_INFO_SI_CP_DMA_COMPUTE: 461 case RADEON_INFO_SI_CP_DMA_COMPUTE:
453 *value = 1; 462 *value = 1;
454 break; 463 break;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 0c7b8c66301b..0b158f98d287 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
422 /* Pin framebuffer & get tilling informations */ 422 /* Pin framebuffer & get tilling informations */
423 obj = radeon_fb->obj; 423 obj = radeon_fb->obj;
424 rbo = gem_to_radeon_bo(obj); 424 rbo = gem_to_radeon_bo(obj);
425retry:
425 r = radeon_bo_reserve(rbo, false); 426 r = radeon_bo_reserve(rbo, false);
426 if (unlikely(r != 0)) 427 if (unlikely(r != 0))
427 return r; 428 return r;
@@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
430 &base); 431 &base);
431 if (unlikely(r != 0)) { 432 if (unlikely(r != 0)) {
432 radeon_bo_unreserve(rbo); 433 radeon_bo_unreserve(rbo);
434
435 /* On old GPU like RN50 with little vram pining can fails because
436 * current fb is taking all space needed. So instead of unpining
437 * the old buffer after pining the new one, first unpin old one
438 * and then retry pining new one.
439 *
440 * As only master can set mode only master can pin and it is
441 * unlikely the master client will race with itself especialy
442 * on those old gpu with single crtc.
443 *
444 * We don't shutdown the display controller because new buffer
445 * will end up in same spot.
446 */
447 if (!atomic && fb && fb != crtc->fb) {
448 struct radeon_bo *old_rbo;
449 unsigned long nsize, osize;
450
451 old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
452 osize = radeon_bo_size(old_rbo);
453 nsize = radeon_bo_size(rbo);
454 if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
455 radeon_bo_unpin(old_rbo);
456 radeon_bo_unreserve(old_rbo);
457 fb = NULL;
458 goto retry;
459 }
460 }
433 return -EINVAL; 461 return -EINVAL;
434 } 462 }
435 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); 463 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 866ace070b91..984097b907ef 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -537,8 +537,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
537 struct device_attribute *attr, 537 struct device_attribute *attr,
538 char *buf) 538 char *buf)
539{ 539{
540 struct drm_device *ddev = dev_get_drvdata(dev); 540 struct radeon_device *rdev = dev_get_drvdata(dev);
541 struct radeon_device *rdev = ddev->dev_private;
542 int temp; 541 int temp;
543 542
544 if (rdev->asic->pm.get_temperature) 543 if (rdev->asic->pm.get_temperature)
@@ -553,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
553 struct device_attribute *attr, 552 struct device_attribute *attr,
554 char *buf) 553 char *buf)
555{ 554{
556 struct drm_device *ddev = dev_get_drvdata(dev); 555 struct radeon_device *rdev = dev_get_drvdata(dev);
557 struct radeon_device *rdev = ddev->dev_private;
558 int hyst = to_sensor_dev_attr(attr)->index; 556 int hyst = to_sensor_dev_attr(attr)->index;
559 int temp; 557 int temp;
560 558
@@ -566,23 +564,14 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
566 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 564 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
567} 565}
568 566
569static ssize_t radeon_hwmon_show_name(struct device *dev,
570 struct device_attribute *attr,
571 char *buf)
572{
573 return sprintf(buf, "radeon\n");
574}
575
576static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 567static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
577static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 568static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
578static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 569static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
579static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
580 570
581static struct attribute *hwmon_attributes[] = { 571static struct attribute *hwmon_attributes[] = {
582 &sensor_dev_attr_temp1_input.dev_attr.attr, 572 &sensor_dev_attr_temp1_input.dev_attr.attr,
583 &sensor_dev_attr_temp1_crit.dev_attr.attr, 573 &sensor_dev_attr_temp1_crit.dev_attr.attr,
584 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 574 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
585 &sensor_dev_attr_name.dev_attr.attr,
586 NULL 575 NULL
587}; 576};
588 577
@@ -590,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
590 struct attribute *attr, int index) 579 struct attribute *attr, int index)
591{ 580{
592 struct device *dev = container_of(kobj, struct device, kobj); 581 struct device *dev = container_of(kobj, struct device, kobj);
593 struct drm_device *ddev = dev_get_drvdata(dev); 582 struct radeon_device *rdev = dev_get_drvdata(dev);
594 struct radeon_device *rdev = ddev->dev_private;
595 583
596 /* Skip limit attributes if DPM is not enabled */ 584 /* Skip limit attributes if DPM is not enabled */
597 if (rdev->pm.pm_method != PM_METHOD_DPM && 585 if (rdev->pm.pm_method != PM_METHOD_DPM &&
@@ -607,11 +595,15 @@ static const struct attribute_group hwmon_attrgroup = {
607 .is_visible = hwmon_attributes_visible, 595 .is_visible = hwmon_attributes_visible,
608}; 596};
609 597
598static const struct attribute_group *hwmon_groups[] = {
599 &hwmon_attrgroup,
600 NULL
601};
602
610static int radeon_hwmon_init(struct radeon_device *rdev) 603static int radeon_hwmon_init(struct radeon_device *rdev)
611{ 604{
612 int err = 0; 605 int err = 0;
613 606 struct device *hwmon_dev;
614 rdev->pm.int_hwmon_dev = NULL;
615 607
616 switch (rdev->pm.int_thermal_type) { 608 switch (rdev->pm.int_thermal_type) {
617 case THERMAL_TYPE_RV6XX: 609 case THERMAL_TYPE_RV6XX:
@@ -624,20 +616,13 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
624 case THERMAL_TYPE_KV: 616 case THERMAL_TYPE_KV:
625 if (rdev->asic->pm.get_temperature == NULL) 617 if (rdev->asic->pm.get_temperature == NULL)
626 return err; 618 return err;
627 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 619 hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
628 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 620 "radeon", rdev,
629 err = PTR_ERR(rdev->pm.int_hwmon_dev); 621 hwmon_groups);
622 if (IS_ERR(hwmon_dev)) {
623 err = PTR_ERR(hwmon_dev);
630 dev_err(rdev->dev, 624 dev_err(rdev->dev,
631 "Unable to register hwmon device: %d\n", err); 625 "Unable to register hwmon device: %d\n", err);
632 break;
633 }
634 dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
635 err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
636 &hwmon_attrgroup);
637 if (err) {
638 dev_err(rdev->dev,
639 "Unable to create hwmon sysfs file: %d\n", err);
640 hwmon_device_unregister(rdev->dev);
641 } 626 }
642 break; 627 break;
643 default: 628 default:
@@ -647,14 +632,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
647 return err; 632 return err;
648} 633}
649 634
650static void radeon_hwmon_fini(struct radeon_device *rdev)
651{
652 if (rdev->pm.int_hwmon_dev) {
653 sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
654 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
655 }
656}
657
658static void radeon_dpm_thermal_work_handler(struct work_struct *work) 635static void radeon_dpm_thermal_work_handler(struct work_struct *work)
659{ 636{
660 struct radeon_device *rdev = 637 struct radeon_device *rdev =
@@ -1252,7 +1229,6 @@ int radeon_pm_init(struct radeon_device *rdev)
1252 case CHIP_RS780: 1229 case CHIP_RS780:
1253 case CHIP_RS880: 1230 case CHIP_RS880:
1254 case CHIP_CAYMAN: 1231 case CHIP_CAYMAN:
1255 case CHIP_ARUBA:
1256 case CHIP_BONAIRE: 1232 case CHIP_BONAIRE:
1257 case CHIP_KABINI: 1233 case CHIP_KABINI:
1258 case CHIP_KAVERI: 1234 case CHIP_KAVERI:
@@ -1284,6 +1260,7 @@ int radeon_pm_init(struct radeon_device *rdev)
1284 case CHIP_BARTS: 1260 case CHIP_BARTS:
1285 case CHIP_TURKS: 1261 case CHIP_TURKS:
1286 case CHIP_CAICOS: 1262 case CHIP_CAICOS:
1263 case CHIP_ARUBA:
1287 case CHIP_TAHITI: 1264 case CHIP_TAHITI:
1288 case CHIP_PITCAIRN: 1265 case CHIP_PITCAIRN:
1289 case CHIP_VERDE: 1266 case CHIP_VERDE:
@@ -1337,8 +1314,6 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
1337 1314
1338 if (rdev->pm.power_state) 1315 if (rdev->pm.power_state)
1339 kfree(rdev->pm.power_state); 1316 kfree(rdev->pm.power_state);
1340
1341 radeon_hwmon_fini(rdev);
1342} 1317}
1343 1318
1344static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1319static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1358,8 +1333,6 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1358 1333
1359 if (rdev->pm.power_state) 1334 if (rdev->pm.power_state)
1360 kfree(rdev->pm.power_state); 1335 kfree(rdev->pm.power_state);
1361
1362 radeon_hwmon_fini(rdev);
1363} 1336}
1364 1337
1365void radeon_pm_fini(struct radeon_device *rdev) 1338void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 18254e1c3e71..9214403ae173 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
61 struct radeon_ib *ib, struct radeon_vm *vm, 61 struct radeon_ib *ib, struct radeon_vm *vm,
62 unsigned size) 62 unsigned size)
63{ 63{
64 int i, r; 64 int r;
65 65
66 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); 66 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
67 if (r) { 67 if (r) {
@@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
87 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); 87 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
88 } 88 }
89 ib->is_const_ib = false; 89 ib->is_const_ib = false;
90 for (i = 0; i < RADEON_NUM_RINGS; ++i)
91 ib->sync_to[i] = NULL;
92 90
93 return 0; 91 return 0;
94} 92}
@@ -109,25 +107,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
109} 107}
110 108
111/** 109/**
112 * radeon_ib_sync_to - sync to fence before executing the IB
113 *
114 * @ib: IB object to add fence to
115 * @fence: fence to sync to
116 *
117 * Sync to the fence before executing the IB
118 */
119void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
120{
121 struct radeon_fence *other;
122
123 if (!fence)
124 return;
125
126 other = ib->sync_to[fence->ring];
127 ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
128}
129
130/**
131 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring 110 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
132 * 111 *
133 * @rdev: radeon_device pointer 112 * @rdev: radeon_device pointer
@@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
151 struct radeon_ib *const_ib) 130 struct radeon_ib *const_ib)
152{ 131{
153 struct radeon_ring *ring = &rdev->ring[ib->ring]; 132 struct radeon_ring *ring = &rdev->ring[ib->ring];
154 bool need_sync = false; 133 int r = 0;
155 int i, r = 0;
156 134
157 if (!ib->length_dw || !ring->ready) { 135 if (!ib->length_dw || !ring->ready) {
158 /* TODO: Nothings in the ib we should report. */ 136 /* TODO: Nothings in the ib we should report. */
@@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
166 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); 144 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
167 return r; 145 return r;
168 } 146 }
169 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 147
170 struct radeon_fence *fence = ib->sync_to[i]; 148 /* sync with other rings */
171 if (radeon_fence_need_sync(fence, ib->ring)) { 149 r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
172 need_sync = true; 150 if (r) {
173 radeon_semaphore_sync_rings(rdev, ib->semaphore, 151 dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
174 fence->ring, ib->ring); 152 radeon_ring_unlock_undo(rdev, ring);
175 radeon_fence_note_sync(fence, ib->ring); 153 return r;
176 }
177 }
178 /* immediately free semaphore when we don't need to sync */
179 if (!need_sync) {
180 radeon_semaphore_free(rdev, &ib->semaphore, NULL);
181 } 154 }
155
182 /* if we can't remember our last VM flush then flush now! */ 156 /* if we can't remember our last VM flush then flush now! */
183 /* XXX figure out why we have to flush for every IB */ 157 /* XXX figure out why we have to flush for every IB */
184 if (ib->vm /*&& !ib->vm->last_flush*/) { 158 if (ib->vm /*&& !ib->vm->last_flush*/) {
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 8dcc20f53d73..2b42aa1914f2 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -29,12 +29,12 @@
29 */ 29 */
30#include <drm/drmP.h> 30#include <drm/drmP.h>
31#include "radeon.h" 31#include "radeon.h"
32 32#include "radeon_trace.h"
33 33
34int radeon_semaphore_create(struct radeon_device *rdev, 34int radeon_semaphore_create(struct radeon_device *rdev,
35 struct radeon_semaphore **semaphore) 35 struct radeon_semaphore **semaphore)
36{ 36{
37 int r; 37 int i, r;
38 38
39 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); 39 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
40 if (*semaphore == NULL) { 40 if (*semaphore == NULL) {
@@ -50,54 +50,121 @@ int radeon_semaphore_create(struct radeon_device *rdev,
50 (*semaphore)->waiters = 0; 50 (*semaphore)->waiters = 0;
51 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); 51 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
52 *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; 52 *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
53
54 for (i = 0; i < RADEON_NUM_RINGS; ++i)
55 (*semaphore)->sync_to[i] = NULL;
56
53 return 0; 57 return 0;
54} 58}
55 59
56void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 60bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
57 struct radeon_semaphore *semaphore) 61 struct radeon_semaphore *semaphore)
58{ 62{
59 --semaphore->waiters; 63 struct radeon_ring *ring = &rdev->ring[ridx];
60 radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false); 64
65 trace_radeon_semaphore_signale(ridx, semaphore);
66
67 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
68 --semaphore->waiters;
69
70 /* for debugging lockup only, used by sysfs debug files */
71 ring->last_semaphore_signal_addr = semaphore->gpu_addr;
72 return true;
73 }
74 return false;
61} 75}
62 76
63void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 77bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
64 struct radeon_semaphore *semaphore) 78 struct radeon_semaphore *semaphore)
65{ 79{
66 ++semaphore->waiters; 80 struct radeon_ring *ring = &rdev->ring[ridx];
67 radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true); 81
82 trace_radeon_semaphore_wait(ridx, semaphore);
83
84 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
85 ++semaphore->waiters;
86
87 /* for debugging lockup only, used by sysfs debug files */
88 ring->last_semaphore_wait_addr = semaphore->gpu_addr;
89 return true;
90 }
91 return false;
92}
93
94/**
95 * radeon_semaphore_sync_to - use the semaphore to sync to a fence
96 *
97 * @semaphore: semaphore object to add fence to
98 * @fence: fence to sync to
99 *
100 * Sync to the fence using this semaphore object
101 */
102void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
103 struct radeon_fence *fence)
104{
105 struct radeon_fence *other;
106
107 if (!fence)
108 return;
109
110 other = semaphore->sync_to[fence->ring];
111 semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
68} 112}
69 113
70/* caller must hold ring lock */ 114/**
115 * radeon_semaphore_sync_rings - sync ring to all registered fences
116 *
117 * @rdev: radeon_device pointer
118 * @semaphore: semaphore object to use for sync
119 * @ring: ring that needs sync
120 *
121 * Ensure that all registered fences are signaled before letting
122 * the ring continue. The caller must hold the ring lock.
123 */
71int radeon_semaphore_sync_rings(struct radeon_device *rdev, 124int radeon_semaphore_sync_rings(struct radeon_device *rdev,
72 struct radeon_semaphore *semaphore, 125 struct radeon_semaphore *semaphore,
73 int signaler, int waiter) 126 int ring)
74{ 127{
75 int r; 128 int i, r;
76 129
77 /* no need to signal and wait on the same ring */ 130 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
78 if (signaler == waiter) { 131 struct radeon_fence *fence = semaphore->sync_to[i];
79 return 0;
80 }
81 132
82 /* prevent GPU deadlocks */ 133 /* check if we really need to sync */
83 if (!rdev->ring[signaler].ready) { 134 if (!radeon_fence_need_sync(fence, ring))
84 dev_err(rdev->dev, "Trying to sync to a disabled ring!"); 135 continue;
85 return -EINVAL;
86 }
87 136
88 r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8); 137 /* prevent GPU deadlocks */
89 if (r) { 138 if (!rdev->ring[i].ready) {
90 return r; 139 dev_err(rdev->dev, "Syncing to a disabled ring!");
91 } 140 return -EINVAL;
92 radeon_semaphore_emit_signal(rdev, signaler, semaphore); 141 }
93 radeon_ring_commit(rdev, &rdev->ring[signaler]);
94 142
95 /* we assume caller has already allocated space on waiters ring */ 143 /* allocate enough space for sync command */
96 radeon_semaphore_emit_wait(rdev, waiter, semaphore); 144 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
145 if (r) {
146 return r;
147 }
97 148
98 /* for debugging lockup only, used by sysfs debug files */ 149 /* emit the signal semaphore */
99 rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; 150 if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
100 rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; 151 /* signaling wasn't successful wait manually */
152 radeon_ring_undo(&rdev->ring[i]);
153 radeon_fence_wait_locked(fence);
154 continue;
155 }
156
157 /* we assume caller has already allocated space on waiters ring */
158 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
159 /* waiting wasn't successful wait manually */
160 radeon_ring_undo(&rdev->ring[i]);
161 radeon_fence_wait_locked(fence);
162 continue;
163 }
164
165 radeon_ring_commit(rdev, &rdev->ring[i]);
166 radeon_fence_note_sync(fence, ring);
167 }
101 168
102 return 0; 169 return 0;
103} 170}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 811bca691b36..0473257d4078 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,39 @@ TRACE_EVENT(radeon_cs,
47 __entry->fences) 47 __entry->fences)
48); 48);
49 49
50TRACE_EVENT(radeon_vm_grab_id,
51 TP_PROTO(unsigned vmid, int ring),
52 TP_ARGS(vmid, ring),
53 TP_STRUCT__entry(
54 __field(u32, vmid)
55 __field(u32, ring)
56 ),
57
58 TP_fast_assign(
59 __entry->vmid = vmid;
60 __entry->ring = ring;
61 ),
62 TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
63);
64
65TRACE_EVENT(radeon_vm_bo_update,
66 TP_PROTO(struct radeon_bo_va *bo_va),
67 TP_ARGS(bo_va),
68 TP_STRUCT__entry(
69 __field(u64, soffset)
70 __field(u64, eoffset)
71 __field(u32, flags)
72 ),
73
74 TP_fast_assign(
75 __entry->soffset = bo_va->soffset;
76 __entry->eoffset = bo_va->eoffset;
77 __entry->flags = bo_va->flags;
78 ),
79 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
80 __entry->soffset, __entry->eoffset, __entry->flags)
81);
82
50TRACE_EVENT(radeon_vm_set_page, 83TRACE_EVENT(radeon_vm_set_page,
51 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, 84 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
52 uint32_t incr, uint32_t flags), 85 uint32_t incr, uint32_t flags),
@@ -111,6 +144,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
111 TP_ARGS(dev, seqno) 144 TP_ARGS(dev, seqno)
112); 145);
113 146
147DECLARE_EVENT_CLASS(radeon_semaphore_request,
148
149 TP_PROTO(int ring, struct radeon_semaphore *sem),
150
151 TP_ARGS(ring, sem),
152
153 TP_STRUCT__entry(
154 __field(int, ring)
155 __field(signed, waiters)
156 __field(uint64_t, gpu_addr)
157 ),
158
159 TP_fast_assign(
160 __entry->ring = ring;
161 __entry->waiters = sem->waiters;
162 __entry->gpu_addr = sem->gpu_addr;
163 ),
164
165 TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
166 __entry->waiters, __entry->gpu_addr)
167);
168
169DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale,
170
171 TP_PROTO(int ring, struct radeon_semaphore *sem),
172
173 TP_ARGS(ring, sem)
174);
175
176DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
177
178 TP_PROTO(int ring, struct radeon_semaphore *sem),
179
180 TP_ARGS(ring, sem)
181);
182
114#endif 183#endif
115 184
116/* This part must be outside protection */ 185/* This part must be outside protection */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index a072fa8c46b0..d46b58d078aa 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -21,7 +21,7 @@ cayman 0x9400
210x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 210x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
220x000089B0 VGT_HS_OFFCHIP_PARAM 220x000089B0 VGT_HS_OFFCHIP_PARAM
230x00008A14 PA_CL_ENHANCE 230x00008A14 PA_CL_ENHANCE
240x00008A60 PA_SC_LINE_STIPPLE_VALUE 240x00008A60 PA_SU_LINE_STIPPLE_VALUE
250x00008B10 PA_SC_LINE_STIPPLE_STATE 250x00008B10 PA_SC_LINE_STIPPLE_STATE
260x00008BF0 PA_SC_ENHANCE 260x00008BF0 PA_SC_ENHANCE
270x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 270x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -532,7 +532,7 @@ cayman 0x9400
5320x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 5320x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
5330x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 5330x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
5340x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET 5340x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
5350x00028B74 VGT_GS_INSTANCE_CNT 5350x00028B90 VGT_GS_INSTANCE_CNT
5360x00028BD4 PA_SC_CENTROID_PRIORITY_0 5360x00028BD4 PA_SC_CENTROID_PRIORITY_0
5370x00028BD8 PA_SC_CENTROID_PRIORITY_1 5370x00028BD8 PA_SC_CENTROID_PRIORITY_1
5380x00028BDC PA_SC_LINE_CNTL 5380x00028BDC PA_SC_LINE_CNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index b912a37689bf..57745c8761c8 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -22,7 +22,7 @@ evergreen 0x9400
220x000089A4 VGT_COMPUTE_START_Z 220x000089A4 VGT_COMPUTE_START_Z
230x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 230x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
240x00008A14 PA_CL_ENHANCE 240x00008A14 PA_CL_ENHANCE
250x00008A60 PA_SC_LINE_STIPPLE_VALUE 250x00008A60 PA_SU_LINE_STIPPLE_VALUE
260x00008B10 PA_SC_LINE_STIPPLE_STATE 260x00008B10 PA_SC_LINE_STIPPLE_STATE
270x00008BF0 PA_SC_ENHANCE 270x00008BF0 PA_SC_ENHANCE
280x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 280x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -545,7 +545,7 @@ evergreen 0x9400
5450x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 5450x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
5460x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 5460x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
5470x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET 5470x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
5480x00028B74 VGT_GS_INSTANCE_CNT 5480x00028B90 VGT_GS_INSTANCE_CNT
5490x00028C00 PA_SC_LINE_CNTL 5490x00028C00 PA_SC_LINE_CNTL
5500x00028C08 PA_SU_VTX_CNTL 5500x00028C08 PA_SU_VTX_CNTL
5510x00028C0C PA_CL_GB_VERT_CLIP_ADJ 5510x00028C0C PA_CL_GB_VERT_CLIP_ADJ
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index f9b02e3d6830..aca8cbe8a335 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
66 return r; 66 return r;
67 } 67 }
68 68
69 if (radeon_fence_need_sync(*fence, ring->idx)) { 69 radeon_semaphore_sync_to(sem, *fence);
70 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 70 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
71 ring->idx);
72 radeon_fence_note_sync(*fence, ring->idx);
73 } else {
74 radeon_semaphore_free(rdev, &sem, NULL);
75 }
76 71
77 for (i = 0; i < num_loops; i++) { 72 for (i = 0; i < num_loops; i++) {
78 cur_size_in_dw = size_in_dw; 73 cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6a64ccaa0695..a36736dab5e0 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3882,8 +3882,15 @@ static int si_mc_init(struct radeon_device *rdev)
3882 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 3882 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
3883 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 3883 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
3884 /* size in MB on si */ 3884 /* size in MB on si */
3885 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; 3885 tmp = RREG32(CONFIG_MEMSIZE);
3886 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; 3886 /* some boards may have garbage in the upper 16 bits */
3887 if (tmp & 0xffff0000) {
3888 DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
3889 if (tmp & 0xffff)
3890 tmp &= 0xffff;
3891 }
3892 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
3893 rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
3887 rdev->mc.visible_vram_size = rdev->mc.aper_size; 3894 rdev->mc.visible_vram_size = rdev->mc.aper_size;
3888 si_vram_gtt_location(rdev, &rdev->mc); 3895 si_vram_gtt_location(rdev, &rdev->mc);
3889 radeon_update_bandwidth_info(rdev); 3896 radeon_update_bandwidth_info(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 8e8f46133532..59be2cfcbb47 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -195,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev,
195 return r; 195 return r;
196 } 196 }
197 197
198 if (radeon_fence_need_sync(*fence, ring->idx)) { 198 radeon_semaphore_sync_to(sem, *fence);
199 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 199 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
200 ring->idx);
201 radeon_fence_note_sync(*fence, ring->idx);
202 } else {
203 radeon_semaphore_free(rdev, &sem, NULL);
204 }
205 200
206 for (i = 0; i < num_loops; i++) { 201 for (i = 0; i < num_loops; i++) {
207 cur_size_in_bytes = size_in_bytes; 202 cur_size_in_bytes = size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 9364129ba292..d700698a1f22 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev)
1873 pi->enable_sclk_ds = true; 1873 pi->enable_sclk_ds = true;
1874 pi->enable_gfx_power_gating = true; 1874 pi->enable_gfx_power_gating = true;
1875 pi->enable_gfx_clock_gating = true; 1875 pi->enable_gfx_clock_gating = true;
1876 pi->enable_mg_clock_gating = true; 1876 pi->enable_mg_clock_gating = false;
1877 pi->enable_gfx_dynamic_mgpg = true; /* ??? */ 1877 pi->enable_gfx_dynamic_mgpg = false;
1878 pi->override_dynamic_mgpg = true; 1878 pi->override_dynamic_mgpg = false;
1879 pi->enable_auto_thermal_throttling = true; 1879 pi->enable_auto_thermal_throttling = true;
1880 pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */ 1880 pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
1881 pi->uvd_dpm = true; /* ??? */ 1881 pi->uvd_dpm = true; /* ??? */
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805d9786..d4a68af1a279 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
357 * 357 *
358 * Emit a semaphore command (either wait or signal) to the UVD ring. 358 * Emit a semaphore command (either wait or signal) to the UVD ring.
359 */ 359 */
360void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, 360bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
361 struct radeon_ring *ring, 361 struct radeon_ring *ring,
362 struct radeon_semaphore *semaphore, 362 struct radeon_semaphore *semaphore,
363 bool emit_wait) 363 bool emit_wait)
@@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
372 372
373 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); 373 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
374 radeon_ring_write(ring, emit_wait ? 1 : 0); 374 radeon_ring_write(ring, emit_wait ? 1 : 0);
375
376 return true;
375} 377}
376 378
377/** 379/**
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
index 5b6fa1f62d4e..d722db2cf340 100644
--- a/drivers/gpu/drm/radeon/uvd_v3_1.c
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -37,7 +37,7 @@
37 * 37 *
38 * Emit a semaphore command (either wait or signal) to the UVD ring. 38 * Emit a semaphore command (either wait or signal) to the UVD ring.
39 */ 39 */
40void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, 40bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
41 struct radeon_ring *ring, 41 struct radeon_ring *ring,
42 struct radeon_semaphore *semaphore, 42 struct radeon_semaphore *semaphore,
43 bool emit_wait) 43 bool emit_wait)
@@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
52 52
53 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); 53 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
54 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); 54 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
55
56 return true;
55} 57}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 28e178137718..07eba596d458 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -135,11 +135,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
135 unsigned int num_relocs = args->num_relocs; 135 unsigned int num_relocs = args->num_relocs;
136 unsigned int num_waitchks = args->num_waitchks; 136 unsigned int num_waitchks = args->num_waitchks;
137 struct drm_tegra_cmdbuf __user *cmdbufs = 137 struct drm_tegra_cmdbuf __user *cmdbufs =
138 (void * __user)(uintptr_t)args->cmdbufs; 138 (void __user *)(uintptr_t)args->cmdbufs;
139 struct drm_tegra_reloc __user *relocs = 139 struct drm_tegra_reloc __user *relocs =
140 (void * __user)(uintptr_t)args->relocs; 140 (void __user *)(uintptr_t)args->relocs;
141 struct drm_tegra_waitchk __user *waitchks = 141 struct drm_tegra_waitchk __user *waitchks =
142 (void * __user)(uintptr_t)args->waitchks; 142 (void __user *)(uintptr_t)args->waitchks;
143 struct drm_tegra_syncpt syncpt; 143 struct drm_tegra_syncpt syncpt;
144 struct host1x_job *job; 144 struct host1x_job *job;
145 int err; 145 int err;
@@ -163,9 +163,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
163 struct drm_tegra_cmdbuf cmdbuf; 163 struct drm_tegra_cmdbuf cmdbuf;
164 struct host1x_bo *bo; 164 struct host1x_bo *bo;
165 165
166 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf)); 166 if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
167 if (err) 167 err = -EFAULT;
168 goto fail; 168 goto fail;
169 }
169 170
170 bo = host1x_bo_lookup(drm, file, cmdbuf.handle); 171 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
171 if (!bo) { 172 if (!bo) {
@@ -178,10 +179,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
178 cmdbufs++; 179 cmdbufs++;
179 } 180 }
180 181
181 err = copy_from_user(job->relocarray, relocs, 182 if (copy_from_user(job->relocarray, relocs,
182 sizeof(*relocs) * num_relocs); 183 sizeof(*relocs) * num_relocs)) {
183 if (err) 184 err = -EFAULT;
184 goto fail; 185 goto fail;
186 }
185 187
186 while (num_relocs--) { 188 while (num_relocs--) {
187 struct host1x_reloc *reloc = &job->relocarray[num_relocs]; 189 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
@@ -199,15 +201,17 @@ int tegra_drm_submit(struct tegra_drm_context *context,
199 } 201 }
200 } 202 }
201 203
202 err = copy_from_user(job->waitchk, waitchks, 204 if (copy_from_user(job->waitchk, waitchks,
203 sizeof(*waitchks) * num_waitchks); 205 sizeof(*waitchks) * num_waitchks)) {
204 if (err) 206 err = -EFAULT;
205 goto fail; 207 goto fail;
208 }
206 209
207 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts, 210 if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
208 sizeof(syncpt)); 211 sizeof(syncpt))) {
209 if (err) 212 err = -EFAULT;
210 goto fail; 213 goto fail;
214 }
211 215
212 job->is_addr_reg = context->client->ops->is_addr_reg; 216 job->is_addr_reg = context->client->ops->is_addr_reg;
213 job->syncpt_incrs = syncpt.incrs; 217 job->syncpt_incrs = syncpt.incrs;
@@ -573,7 +577,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
573} 577}
574#endif 578#endif
575 579
576struct drm_driver tegra_drm_driver = { 580static struct drm_driver tegra_drm_driver = {
577 .driver_features = DRIVER_MODESET | DRIVER_GEM, 581 .driver_features = DRIVER_MODESET | DRIVER_GEM,
578 .load = tegra_drm_load, 582 .load = tegra_drm_load,
579 .unload = tegra_drm_unload, 583 .unload = tegra_drm_unload,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index fdfe259ed7f8..7da0b923131f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -116,7 +116,7 @@ host1x_client_to_dc(struct host1x_client *client)
116 116
117static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) 117static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
118{ 118{
119 return container_of(crtc, struct tegra_dc, base); 119 return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
120} 120}
121 121
122static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, 122static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 490f7719e317..a3835e7de184 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -247,7 +247,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
247 info->var.yoffset * fb->pitches[0]; 247 info->var.yoffset * fb->pitches[0];
248 248
249 drm->mode_config.fb_base = (resource_size_t)bo->paddr; 249 drm->mode_config.fb_base = (resource_size_t)bo->paddr;
250 info->screen_base = bo->vaddr + offset; 250 info->screen_base = (void __iomem *)bo->vaddr + offset;
251 info->screen_size = size; 251 info->screen_size = size;
252 info->fix.smem_start = (unsigned long)(bo->paddr + offset); 252 info->fix.smem_start = (unsigned long)(bo->paddr + offset);
253 info->fix.smem_len = size; 253 info->fix.smem_len = size;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index ba47ca4fb880..3b29018913a5 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -14,6 +14,8 @@
14 14
15struct tegra_rgb { 15struct tegra_rgb {
16 struct tegra_output output; 16 struct tegra_output output;
17 struct tegra_dc *dc;
18
17 struct clk *clk_parent; 19 struct clk *clk_parent;
18 struct clk *clk; 20 struct clk *clk;
19}; 21};
@@ -84,18 +86,18 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
84 86
85static int tegra_output_rgb_enable(struct tegra_output *output) 87static int tegra_output_rgb_enable(struct tegra_output *output)
86{ 88{
87 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 89 struct tegra_rgb *rgb = to_rgb(output);
88 90
89 tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable)); 91 tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
90 92
91 return 0; 93 return 0;
92} 94}
93 95
94static int tegra_output_rgb_disable(struct tegra_output *output) 96static int tegra_output_rgb_disable(struct tegra_output *output)
95{ 97{
96 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 98 struct tegra_rgb *rgb = to_rgb(output);
97 99
98 tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable)); 100 tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
99 101
100 return 0; 102 return 0;
101} 103}
@@ -146,6 +148,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
146 148
147 rgb->output.dev = dc->dev; 149 rgb->output.dev = dc->dev;
148 rgb->output.of_node = np; 150 rgb->output.of_node = np;
151 rgb->dc = dc;
149 152
150 err = tegra_output_probe(&rgb->output); 153 err = tegra_output_probe(&rgb->output);
151 if (err < 0) 154 if (err < 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d5a646ebe6a..07e02c4bf5a8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
151 atomic_dec(&bo->glob->bo_count); 151 atomic_dec(&bo->glob->bo_count);
152 if (bo->resv == &bo->ttm_resv) 152 if (bo->resv == &bo->ttm_resv)
153 reservation_object_fini(&bo->ttm_resv); 153 reservation_object_fini(&bo->ttm_resv);
154 154 mutex_destroy(&bo->wu_mutex);
155 if (bo->destroy) 155 if (bo->destroy)
156 bo->destroy(bo); 156 bo->destroy(bo);
157 else { 157 else {
@@ -1123,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1123 INIT_LIST_HEAD(&bo->ddestroy); 1123 INIT_LIST_HEAD(&bo->ddestroy);
1124 INIT_LIST_HEAD(&bo->swap); 1124 INIT_LIST_HEAD(&bo->swap);
1125 INIT_LIST_HEAD(&bo->io_reserve_lru); 1125 INIT_LIST_HEAD(&bo->io_reserve_lru);
1126 mutex_init(&bo->wu_mutex);
1126 bo->bdev = bdev; 1127 bo->bdev = bdev;
1127 bo->glob = bdev->glob; 1128 bo->glob = bdev->glob;
1128 bo->type = type; 1129 bo->type = type;
@@ -1704,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1704 ; 1705 ;
1705} 1706}
1706EXPORT_SYMBOL(ttm_bo_swapout_all); 1707EXPORT_SYMBOL(ttm_bo_swapout_all);
1708
1709/**
1710 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1711 * unreserved
1712 *
1713 * @bo: Pointer to buffer
1714 */
1715int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1716{
1717 int ret;
1718
1719 /*
1720 * In the absense of a wait_unlocked API,
1721 * Use the bo::wu_mutex to avoid triggering livelocks due to
1722 * concurrent use of this function. Note that this use of
1723 * bo::wu_mutex can go away if we change locking order to
1724 * mmap_sem -> bo::reserve.
1725 */
1726 ret = mutex_lock_interruptible(&bo->wu_mutex);
1727 if (unlikely(ret != 0))
1728 return -ERESTARTSYS;
1729 if (!ww_mutex_is_locked(&bo->resv->lock))
1730 goto out_unlock;
1731 ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
1732 if (unlikely(ret != 0))
1733 goto out_unlock;
1734 ww_mutex_unlock(&bo->resv->lock);
1735
1736out_unlock:
1737 mutex_unlock(&bo->wu_mutex);
1738 return ret;
1739}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 4834c463c38b..15b86a94949d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -350,10 +350,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
350 goto out2; 350 goto out2;
351 351
352 /* 352 /*
353 * Move nonexistent data. NOP. 353 * Don't move nonexistent data. Clear destination instead.
354 */ 354 */
355 if (old_iomap == NULL && ttm == NULL) 355 if (old_iomap == NULL &&
356 (ttm == NULL || ttm->state == tt_unpopulated)) {
357 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
356 goto out2; 358 goto out2;
359 }
357 360
358 /* 361 /*
359 * TTM might be null for moves within the same region. 362 * TTM might be null for moves within the same region.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index ac617f3ecd0c..b249ab9b1eb2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -107,13 +107,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 /* 107 /*
108 * Work around locking order reversal in fault / nopfn 108 * Work around locking order reversal in fault / nopfn
109 * between mmap_sem and bo_reserve: Perform a trylock operation 109 * between mmap_sem and bo_reserve: Perform a trylock operation
110 * for reserve, and if it fails, retry the fault after scheduling. 110 * for reserve, and if it fails, retry the fault after waiting
111 * for the buffer to become unreserved.
111 */ 112 */
112 113 ret = ttm_bo_reserve(bo, true, true, false, NULL);
113 ret = ttm_bo_reserve(bo, true, true, false, 0);
114 if (unlikely(ret != 0)) { 114 if (unlikely(ret != 0)) {
115 if (ret == -EBUSY) 115 if (ret != -EBUSY)
116 set_need_resched(); 116 return VM_FAULT_NOPAGE;
117
118 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
119 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
120 up_read(&vma->vm_mm->mmap_sem);
121 (void) ttm_bo_wait_unreserved(bo);
122 }
123
124 return VM_FAULT_RETRY;
125 }
126
127 /*
128 * If we'd want to change locking order to
129 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
130 * instead of retrying the fault...
131 */
117 return VM_FAULT_NOPAGE; 132 return VM_FAULT_NOPAGE;
118 } 133 }
119 134
@@ -123,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
123 case 0: 138 case 0:
124 break; 139 break;
125 case -EBUSY: 140 case -EBUSY:
126 set_need_resched();
127 case -ERESTARTSYS: 141 case -ERESTARTSYS:
128 retval = VM_FAULT_NOPAGE; 142 retval = VM_FAULT_NOPAGE;
129 goto out_unlock; 143 goto out_unlock;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 6c911789ae5c..479e9418e3d7 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,8 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list, 35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36 struct ww_acquire_ctx *ticket)
37{ 36{
38 struct ttm_validate_buffer *entry; 37 struct ttm_validate_buffer *entry;
39 38
@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
93 entry = list_first_entry(list, struct ttm_validate_buffer, head); 92 entry = list_first_entry(list, struct ttm_validate_buffer, head);
94 glob = entry->bo->glob; 93 glob = entry->bo->glob;
95 spin_lock(&glob->lru_lock); 94 spin_lock(&glob->lru_lock);
96 ttm_eu_backoff_reservation_locked(list, ticket); 95 ttm_eu_backoff_reservation_locked(list);
97 ww_acquire_fini(ticket); 96 if (ticket)
97 ww_acquire_fini(ticket);
98 spin_unlock(&glob->lru_lock); 98 spin_unlock(&glob->lru_lock);
99} 99}
100EXPORT_SYMBOL(ttm_eu_backoff_reservation); 100EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
130 entry = list_first_entry(list, struct ttm_validate_buffer, head); 130 entry = list_first_entry(list, struct ttm_validate_buffer, head);
131 glob = entry->bo->glob; 131 glob = entry->bo->glob;
132 132
133 ww_acquire_init(ticket, &reservation_ww_class); 133 if (ticket)
134 ww_acquire_init(ticket, &reservation_ww_class);
134retry: 135retry:
135 list_for_each_entry(entry, list, head) { 136 list_for_each_entry(entry, list, head) {
136 struct ttm_buffer_object *bo = entry->bo; 137 struct ttm_buffer_object *bo = entry->bo;
@@ -139,16 +140,17 @@ retry:
139 if (entry->reserved) 140 if (entry->reserved)
140 continue; 141 continue;
141 142
142 143 ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
143 ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); 144 ticket);
144 145
145 if (ret == -EDEADLK) { 146 if (ret == -EDEADLK) {
146 /* uh oh, we lost out, drop every reservation and try 147 /* uh oh, we lost out, drop every reservation and try
147 * to only reserve this buffer, then start over if 148 * to only reserve this buffer, then start over if
148 * this succeeds. 149 * this succeeds.
149 */ 150 */
151 BUG_ON(ticket == NULL);
150 spin_lock(&glob->lru_lock); 152 spin_lock(&glob->lru_lock);
151 ttm_eu_backoff_reservation_locked(list, ticket); 153 ttm_eu_backoff_reservation_locked(list);
152 spin_unlock(&glob->lru_lock); 154 spin_unlock(&glob->lru_lock);
153 ttm_eu_list_ref_sub(list); 155 ttm_eu_list_ref_sub(list);
154 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 156 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -175,7 +177,8 @@ retry:
175 } 177 }
176 } 178 }
177 179
178 ww_acquire_done(ticket); 180 if (ticket)
181 ww_acquire_done(ticket);
179 spin_lock(&glob->lru_lock); 182 spin_lock(&glob->lru_lock);
180 ttm_eu_del_from_lru_locked(list); 183 ttm_eu_del_from_lru_locked(list);
181 spin_unlock(&glob->lru_lock); 184 spin_unlock(&glob->lru_lock);
@@ -184,12 +187,14 @@ retry:
184 187
185err: 188err:
186 spin_lock(&glob->lru_lock); 189 spin_lock(&glob->lru_lock);
187 ttm_eu_backoff_reservation_locked(list, ticket); 190 ttm_eu_backoff_reservation_locked(list);
188 spin_unlock(&glob->lru_lock); 191 spin_unlock(&glob->lru_lock);
189 ttm_eu_list_ref_sub(list); 192 ttm_eu_list_ref_sub(list);
190err_fini: 193err_fini:
191 ww_acquire_done(ticket); 194 if (ticket) {
192 ww_acquire_fini(ticket); 195 ww_acquire_done(ticket);
196 ww_acquire_fini(ticket);
197 }
193 return ret; 198 return ret;
194} 199}
195EXPORT_SYMBOL(ttm_eu_reserve_buffers); 200EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
224 } 229 }
225 spin_unlock(&bdev->fence_lock); 230 spin_unlock(&bdev->fence_lock);
226 spin_unlock(&glob->lru_lock); 231 spin_unlock(&glob->lru_lock);
227 ww_acquire_fini(ticket); 232 if (ticket)
233 ww_acquire_fini(ticket);
228 234
229 list_for_each_entry(entry, list, head) { 235 list_for_each_entry(entry, list, head) {
230 if (entry->old_sync_obj) 236 if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index a868176c258a..6fe7b92a82d1 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,12 @@
26 **************************************************************************/ 26 **************************************************************************/
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 *
30 * While no substantial code is shared, the prime code is inspired by
31 * drm_prime.c, with
32 * Authors:
33 * Dave Airlie <airlied@redhat.com>
34 * Rob Clark <rob.clark@linaro.org>
29 */ 35 */
30/** @file ttm_ref_object.c 36/** @file ttm_ref_object.c
31 * 37 *
@@ -34,6 +40,7 @@
34 * and release on file close. 40 * and release on file close.
35 */ 41 */
36 42
43
37/** 44/**
38 * struct ttm_object_file 45 * struct ttm_object_file
39 * 46 *
@@ -84,6 +91,9 @@ struct ttm_object_device {
84 struct drm_open_hash object_hash; 91 struct drm_open_hash object_hash;
85 atomic_t object_count; 92 atomic_t object_count;
86 struct ttm_mem_global *mem_glob; 93 struct ttm_mem_global *mem_glob;
94 struct dma_buf_ops ops;
95 void (*dmabuf_release)(struct dma_buf *dma_buf);
96 size_t dma_buf_size;
87}; 97};
88 98
89/** 99/**
@@ -116,6 +126,8 @@ struct ttm_ref_object {
116 struct ttm_object_file *tfile; 126 struct ttm_object_file *tfile;
117}; 127};
118 128
129static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
130
119static inline struct ttm_object_file * 131static inline struct ttm_object_file *
120ttm_object_file_ref(struct ttm_object_file *tfile) 132ttm_object_file_ref(struct ttm_object_file *tfile)
121{ 133{
@@ -416,9 +428,10 @@ out_err:
416} 428}
417EXPORT_SYMBOL(ttm_object_file_init); 429EXPORT_SYMBOL(ttm_object_file_init);
418 430
419struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global 431struct ttm_object_device *
420 *mem_glob, 432ttm_object_device_init(struct ttm_mem_global *mem_glob,
421 unsigned int hash_order) 433 unsigned int hash_order,
434 const struct dma_buf_ops *ops)
422{ 435{
423 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); 436 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
424 int ret; 437 int ret;
@@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
430 spin_lock_init(&tdev->object_lock); 443 spin_lock_init(&tdev->object_lock);
431 atomic_set(&tdev->object_count, 0); 444 atomic_set(&tdev->object_count, 0);
432 ret = drm_ht_create(&tdev->object_hash, hash_order); 445 ret = drm_ht_create(&tdev->object_hash, hash_order);
446 if (ret != 0)
447 goto out_no_object_hash;
433 448
434 if (likely(ret == 0)) 449 tdev->ops = *ops;
435 return tdev; 450 tdev->dmabuf_release = tdev->ops.release;
451 tdev->ops.release = ttm_prime_dmabuf_release;
452 tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
453 ttm_round_pot(sizeof(struct file));
454 return tdev;
436 455
456out_no_object_hash:
437 kfree(tdev); 457 kfree(tdev);
438 return NULL; 458 return NULL;
439} 459}
@@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
452 kfree(tdev); 472 kfree(tdev);
453} 473}
454EXPORT_SYMBOL(ttm_object_device_release); 474EXPORT_SYMBOL(ttm_object_device_release);
475
476/**
477 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
478 *
479 * @dma_buf: Non-refcounted pointer to a struct dma-buf.
480 *
481 * Obtain a file reference from a lookup structure that doesn't refcount
482 * the file, but synchronizes with its release method to make sure it has
483 * not been freed yet. See for example kref_get_unless_zero documentation.
484 * Returns true if refcounting succeeds, false otherwise.
485 *
486 * Nobody really wants this as a public API yet, so let it mature here
487 * for some time...
488 */
489static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
490{
491 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
492}
493
494/**
495 * ttm_prime_refcount_release - refcount release method for a prime object.
496 *
497 * @p_base: Pointer to ttm_base_object pointer.
498 *
499 * This is a wrapper that calls the refcount_release founction of the
500 * underlying object. At the same time it cleans up the prime object.
501 * This function is called when all references to the base object we
502 * derive from are gone.
503 */
504static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
505{
506 struct ttm_base_object *base = *p_base;
507 struct ttm_prime_object *prime;
508
509 *p_base = NULL;
510 prime = container_of(base, struct ttm_prime_object, base);
511 BUG_ON(prime->dma_buf != NULL);
512 mutex_destroy(&prime->mutex);
513 if (prime->refcount_release)
514 prime->refcount_release(&base);
515}
516
517/**
518 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
519 *
520 * @dma_buf:
521 *
522 * This function first calls the dma_buf release method the driver
523 * provides. Then it cleans up our dma_buf pointer used for lookup,
524 * and finally releases the reference the dma_buf has on our base
525 * object.
526 */
527static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
528{
529 struct ttm_prime_object *prime =
530 (struct ttm_prime_object *) dma_buf->priv;
531 struct ttm_base_object *base = &prime->base;
532 struct ttm_object_device *tdev = base->tfile->tdev;
533
534 if (tdev->dmabuf_release)
535 tdev->dmabuf_release(dma_buf);
536 mutex_lock(&prime->mutex);
537 if (prime->dma_buf == dma_buf)
538 prime->dma_buf = NULL;
539 mutex_unlock(&prime->mutex);
540 ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
541 ttm_base_object_unref(&base);
542}
543
544/**
545 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
546 *
547 * @tfile: A struct ttm_object_file identifying the caller.
548 * @fd: The prime / dmabuf fd.
549 * @handle: The returned handle.
550 *
551 * This function returns a handle to an object that previously exported
552 * a dma-buf. Note that we don't handle imports yet, because we simply
553 * have no consumers of that implementation.
554 */
555int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
556 int fd, u32 *handle)
557{
558 struct ttm_object_device *tdev = tfile->tdev;
559 struct dma_buf *dma_buf;
560 struct ttm_prime_object *prime;
561 struct ttm_base_object *base;
562 int ret;
563
564 dma_buf = dma_buf_get(fd);
565 if (IS_ERR(dma_buf))
566 return PTR_ERR(dma_buf);
567
568 if (dma_buf->ops != &tdev->ops)
569 return -ENOSYS;
570
571 prime = (struct ttm_prime_object *) dma_buf->priv;
572 base = &prime->base;
573 *handle = base->hash.key;
574 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
575
576 dma_buf_put(dma_buf);
577
578 return ret;
579}
580EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
581
582/**
583 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
584 *
585 * @tfile: Struct ttm_object_file identifying the caller.
586 * @handle: Handle to the object we're exporting from.
587 * @flags: flags for dma-buf creation. We just pass them on.
588 * @prime_fd: The returned file descriptor.
589 *
590 */
591int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
592 uint32_t handle, uint32_t flags,
593 int *prime_fd)
594{
595 struct ttm_object_device *tdev = tfile->tdev;
596 struct ttm_base_object *base;
597 struct dma_buf *dma_buf;
598 struct ttm_prime_object *prime;
599 int ret;
600
601 base = ttm_base_object_lookup(tfile, handle);
602 if (unlikely(base == NULL ||
603 base->object_type != ttm_prime_type)) {
604 ret = -ENOENT;
605 goto out_unref;
606 }
607
608 prime = container_of(base, struct ttm_prime_object, base);
609 if (unlikely(!base->shareable)) {
610 ret = -EPERM;
611 goto out_unref;
612 }
613
614 ret = mutex_lock_interruptible(&prime->mutex);
615 if (unlikely(ret != 0)) {
616 ret = -ERESTARTSYS;
617 goto out_unref;
618 }
619
620 dma_buf = prime->dma_buf;
621 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
622
623 /*
624 * Need to create a new dma_buf, with memory accounting.
625 */
626 ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
627 false, true);
628 if (unlikely(ret != 0)) {
629 mutex_unlock(&prime->mutex);
630 goto out_unref;
631 }
632
633 dma_buf = dma_buf_export(prime, &tdev->ops,
634 prime->size, flags);
635 if (IS_ERR(dma_buf)) {
636 ret = PTR_ERR(dma_buf);
637 ttm_mem_global_free(tdev->mem_glob,
638 tdev->dma_buf_size);
639 mutex_unlock(&prime->mutex);
640 goto out_unref;
641 }
642
643 /*
644 * dma_buf has taken the base object reference
645 */
646 base = NULL;
647 prime->dma_buf = dma_buf;
648 }
649 mutex_unlock(&prime->mutex);
650
651 ret = dma_buf_fd(dma_buf, flags);
652 if (ret >= 0) {
653 *prime_fd = ret;
654 ret = 0;
655 } else
656 dma_buf_put(dma_buf);
657
658out_unref:
659 if (base)
660 ttm_base_object_unref(&base);
661 return ret;
662}
663EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
664
665/**
666 * ttm_prime_object_init - Initialize a ttm_prime_object
667 *
668 * @tfile: struct ttm_object_file identifying the caller
669 * @size: The size of the dma_bufs we export.
670 * @prime: The object to be initialized.
671 * @shareable: See ttm_base_object_init
672 * @type: See ttm_base_object_init
673 * @refcount_release: See ttm_base_object_init
674 * @ref_obj_release: See ttm_base_object_init
675 *
676 * Initializes an object which is compatible with the drm_prime model
677 * for data sharing between processes and devices.
678 */
679int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
680 struct ttm_prime_object *prime, bool shareable,
681 enum ttm_object_type type,
682 void (*refcount_release) (struct ttm_base_object **),
683 void (*ref_obj_release) (struct ttm_base_object *,
684 enum ttm_ref_type ref_type))
685{
686 mutex_init(&prime->mutex);
687 prime->size = PAGE_ALIGN(size);
688 prime->real_type = type;
689 prime->dma_buf = NULL;
690 prime->refcount_release = refcount_release;
691 return ttm_base_object_init(tfile, &prime->base, shareable,
692 ttm_prime_type,
693 ttm_prime_refcount_release,
694 ref_obj_release);
695}
696EXPORT_SYMBOL(ttm_prime_object_init);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 24ffbe990736..8d67b943ac05 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -125,6 +125,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
125 125
126static void udl_gem_put_pages(struct udl_gem_object *obj) 126static void udl_gem_put_pages(struct udl_gem_object *obj)
127{ 127{
128 if (obj->base.import_attach) {
129 drm_free_large(obj->pages);
130 obj->pages = NULL;
131 return;
132 }
133
128 drm_gem_put_pages(&obj->base, obj->pages, false, false); 134 drm_gem_put_pages(&obj->base, obj->pages, false, false);
129 obj->pages = NULL; 135 obj->pages = NULL;
130} 136}
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 2cc6cd91ac11..9f8b690bcf52 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
9 vmwgfx_surface.o 9 vmwgfx_surface.o vmwgfx_prime.o
10 10
11obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 11obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 7776e6f0aef6..0489c6152482 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -150,6 +150,8 @@ struct vmw_ttm_tt {
150 bool mapped; 150 bool mapped;
151}; 151};
152 152
153const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
154
153/** 155/**
154 * Helper functions to advance a struct vmw_piter iterator. 156 * Helper functions to advance a struct vmw_piter iterator.
155 * 157 *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 20d5485eaf98..c7a549694e59 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -677,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
677 } 677 }
678 678
679 dev_priv->tdev = ttm_object_device_init 679 dev_priv->tdev = ttm_object_device_init
680 (dev_priv->mem_global_ref.object, 12); 680 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
681 681
682 if (unlikely(dev_priv->tdev == NULL)) { 682 if (unlikely(dev_priv->tdev == NULL)) {
683 DRM_ERROR("Unable to initialize TTM object management.\n"); 683 DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -1210,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = {
1210 1210
1211static struct drm_driver driver = { 1211static struct drm_driver driver = {
1212 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 1212 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1213 DRIVER_MODESET, 1213 DRIVER_MODESET | DRIVER_PRIME,
1214 .load = vmw_driver_load, 1214 .load = vmw_driver_load,
1215 .unload = vmw_driver_unload, 1215 .unload = vmw_driver_unload,
1216 .lastclose = vmw_lastclose, 1216 .lastclose = vmw_lastclose,
@@ -1235,6 +1235,9 @@ static struct drm_driver driver = {
1235 .dumb_map_offset = vmw_dumb_map_offset, 1235 .dumb_map_offset = vmw_dumb_map_offset,
1236 .dumb_destroy = vmw_dumb_destroy, 1236 .dumb_destroy = vmw_dumb_destroy,
1237 1237
1238 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1239 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1240
1238 .fops = &vmwgfx_driver_fops, 1241 .fops = &vmwgfx_driver_fops,
1239 .name = VMWGFX_DRIVER_NAME, 1242 .name = VMWGFX_DRIVER_NAME,
1240 .desc = VMWGFX_DRIVER_DESC, 1243 .desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e401d5dbcb96..20890ad8408b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
615 * TTM buffer object driver - vmwgfx_buffer.c 615 * TTM buffer object driver - vmwgfx_buffer.c
616 */ 616 */
617 617
618extern const size_t vmw_tt_size;
618extern struct ttm_placement vmw_vram_placement; 619extern struct ttm_placement vmw_vram_placement;
619extern struct ttm_placement vmw_vram_ne_placement; 620extern struct ttm_placement vmw_vram_ne_placement;
620extern struct ttm_placement vmw_vram_sys_placement; 621extern struct ttm_placement vmw_vram_sys_placement;
@@ -819,6 +820,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
819extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; 820extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
820 821
821/** 822/**
823 * Prime - vmwgfx_prime.c
824 */
825
826extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
827extern int vmw_prime_fd_to_handle(struct drm_device *dev,
828 struct drm_file *file_priv,
829 int fd, u32 *handle);
830extern int vmw_prime_handle_to_fd(struct drm_device *dev,
831 struct drm_file *file_priv,
832 uint32_t handle, uint32_t flags,
833 int *prime_fd);
834
835
836/**
822 * Inline helper functions 837 * Inline helper functions
823 */ 838 */
824 839
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ecb3d867b426..03f1c2038631 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
75 vmw_surface_unreference(&du->cursor_surface); 75 vmw_surface_unreference(&du->cursor_surface);
76 if (du->cursor_dmabuf) 76 if (du->cursor_dmabuf)
77 vmw_dmabuf_unreference(&du->cursor_dmabuf); 77 vmw_dmabuf_unreference(&du->cursor_dmabuf);
78 drm_sysfs_connector_remove(&du->connector);
78 drm_crtc_cleanup(&du->crtc); 79 drm_crtc_cleanup(&du->crtc);
79 drm_encoder_cleanup(&du->encoder); 80 drm_encoder_cleanup(&du->encoder);
80 drm_connector_cleanup(&du->connector); 81 drm_connector_cleanup(&du->connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 79f7e8e60529..a055a26819c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
260 connector->encoder = NULL; 260 connector->encoder = NULL;
261 encoder->crtc = NULL; 261 encoder->crtc = NULL;
262 crtc->fb = NULL; 262 crtc->fb = NULL;
263 crtc->enabled = false;
263 264
264 vmw_ldu_del_active(dev_priv, ldu); 265 vmw_ldu_del_active(dev_priv, ldu);
265 266
@@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
285 crtc->x = set->x; 286 crtc->x = set->x;
286 crtc->y = set->y; 287 crtc->y = set->y;
287 crtc->mode = *mode; 288 crtc->mode = *mode;
289 crtc->enabled = true;
288 290
289 vmw_ldu_add_active(dev_priv, ldu, vfb); 291 vmw_ldu_add_active(dev_priv, ldu, vfb);
290 292
@@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
369 encoder->possible_crtcs = (1 << unit); 371 encoder->possible_crtcs = (1 << unit);
370 encoder->possible_clones = 0; 372 encoder->possible_clones = 0;
371 373
374 (void) drm_sysfs_connector_add(connector);
375
372 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); 376 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
373 377
374 drm_mode_crtc_set_gamma_size(crtc, 256); 378 drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
new file mode 100644
index 000000000000..31fe32d8d65a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -0,0 +1,137 @@
1/**************************************************************************
2 *
3 * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors:
29 * Thomas Hellstrom <thellstrom@vmware.com>
30 *
31 */
32
33#include "vmwgfx_drv.h"
34#include <linux/dma-buf.h>
35#include <drm/ttm/ttm_object.h>
36
37/*
38 * DMA-BUF attach- and mapping methods. No need to implement
39 * these until we have other virtual devices use them.
40 */
41
42static int vmw_prime_map_attach(struct dma_buf *dma_buf,
43 struct device *target_dev,
44 struct dma_buf_attachment *attach)
45{
46 return -ENOSYS;
47}
48
49static void vmw_prime_map_detach(struct dma_buf *dma_buf,
50 struct dma_buf_attachment *attach)
51{
52}
53
54static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
55 enum dma_data_direction dir)
56{
57 return ERR_PTR(-ENOSYS);
58}
59
60static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
61 struct sg_table *sgb,
62 enum dma_data_direction dir)
63{
64}
65
66static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
67{
68 return NULL;
69}
70
71static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
72{
73}
74
75static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
76 unsigned long page_num)
77{
78 return NULL;
79}
80
81static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
82 unsigned long page_num, void *addr)
83{
84
85}
86static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
87 unsigned long page_num)
88{
89 return NULL;
90}
91
92static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
93 unsigned long page_num, void *addr)
94{
95
96}
97
98static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
99 struct vm_area_struct *vma)
100{
101 WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
102 return -ENOSYS;
103}
104
105const struct dma_buf_ops vmw_prime_dmabuf_ops = {
106 .attach = vmw_prime_map_attach,
107 .detach = vmw_prime_map_detach,
108 .map_dma_buf = vmw_prime_map_dma_buf,
109 .unmap_dma_buf = vmw_prime_unmap_dma_buf,
110 .release = NULL,
111 .kmap = vmw_prime_dmabuf_kmap,
112 .kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
113 .kunmap = vmw_prime_dmabuf_kunmap,
114 .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
115 .mmap = vmw_prime_dmabuf_mmap,
116 .vmap = vmw_prime_dmabuf_vmap,
117 .vunmap = vmw_prime_dmabuf_vunmap,
118};
119
120int vmw_prime_fd_to_handle(struct drm_device *dev,
121 struct drm_file *file_priv,
122 int fd, u32 *handle)
123{
124 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
125
126 return ttm_prime_fd_to_handle(tfile, fd, handle);
127}
128
129int vmw_prime_handle_to_fd(struct drm_device *dev,
130 struct drm_file *file_priv,
131 uint32_t handle, uint32_t flags,
132 int *prime_fd)
133{
134 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
135
136 return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
137}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 252501a54def..9b5ea2ac7ddf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,7 +35,7 @@
35#define VMW_RES_EVICT_ERR_COUNT 10 35#define VMW_RES_EVICT_ERR_COUNT 10
36 36
37struct vmw_user_dma_buffer { 37struct vmw_user_dma_buffer {
38 struct ttm_base_object base; 38 struct ttm_prime_object prime;
39 struct vmw_dma_buffer dma; 39 struct vmw_dma_buffer dma;
40}; 40};
41 41
@@ -297,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
297 if (unlikely(base == NULL)) 297 if (unlikely(base == NULL))
298 return -EINVAL; 298 return -EINVAL;
299 299
300 if (unlikely(base->object_type != converter->object_type)) 300 if (unlikely(ttm_base_object_type(base) != converter->object_type))
301 goto out_bad_resource; 301 goto out_bad_resource;
302 302
303 res = converter->base_obj_to_res(base); 303 res = converter->base_obj_to_res(base);
@@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
352/** 352/**
353 * Buffer management. 353 * Buffer management.
354 */ 354 */
355
356/**
357 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
358 *
359 * @dev_priv: Pointer to a struct vmw_private identifying the device.
360 * @size: The requested buffer size.
361 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
362 */
363static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
364 bool user)
365{
366 static size_t struct_size, user_struct_size;
367 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
368 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
369
370 if (unlikely(struct_size == 0)) {
371 size_t backend_size = ttm_round_pot(vmw_tt_size);
372
373 struct_size = backend_size +
374 ttm_round_pot(sizeof(struct vmw_dma_buffer));
375 user_struct_size = backend_size +
376 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
377 }
378
379 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
380 page_array_size +=
381 ttm_round_pot(num_pages * sizeof(dma_addr_t));
382
383 return ((user) ? user_struct_size : struct_size) +
384 page_array_size;
385}
386
355void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 387void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
356{ 388{
357 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 389 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
@@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
359 kfree(vmw_bo); 391 kfree(vmw_bo);
360} 392}
361 393
394static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
395{
396 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
397
398 ttm_prime_object_kfree(vmw_user_bo, prime);
399}
400
362int vmw_dmabuf_init(struct vmw_private *dev_priv, 401int vmw_dmabuf_init(struct vmw_private *dev_priv,
363 struct vmw_dma_buffer *vmw_bo, 402 struct vmw_dma_buffer *vmw_bo,
364 size_t size, struct ttm_placement *placement, 403 size_t size, struct ttm_placement *placement,
@@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
368 struct ttm_bo_device *bdev = &dev_priv->bdev; 407 struct ttm_bo_device *bdev = &dev_priv->bdev;
369 size_t acc_size; 408 size_t acc_size;
370 int ret; 409 int ret;
410 bool user = (bo_free == &vmw_user_dmabuf_destroy);
371 411
372 BUG_ON(!bo_free); 412 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
373 413
374 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); 414 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
375 memset(vmw_bo, 0, sizeof(*vmw_bo)); 415 memset(vmw_bo, 0, sizeof(*vmw_bo));
376 416
377 INIT_LIST_HEAD(&vmw_bo->res_list); 417 INIT_LIST_HEAD(&vmw_bo->res_list);
378 418
379 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 419 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
380 ttm_bo_type_device, placement, 420 (user) ? ttm_bo_type_device :
421 ttm_bo_type_kernel, placement,
381 0, interruptible, 422 0, interruptible,
382 NULL, acc_size, NULL, bo_free); 423 NULL, acc_size, NULL, bo_free);
383 return ret; 424 return ret;
384} 425}
385 426
386static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
387{
388 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
389
390 ttm_base_object_kfree(vmw_user_bo, base);
391}
392
393static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 427static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
394{ 428{
395 struct vmw_user_dma_buffer *vmw_user_bo; 429 struct vmw_user_dma_buffer *vmw_user_bo;
@@ -401,7 +435,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
401 if (unlikely(base == NULL)) 435 if (unlikely(base == NULL))
402 return; 436 return;
403 437
404 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 438 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
439 prime.base);
405 bo = &vmw_user_bo->dma.base; 440 bo = &vmw_user_bo->dma.base;
406 ttm_bo_unref(&bo); 441 ttm_bo_unref(&bo);
407} 442}
@@ -442,18 +477,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
442 return ret; 477 return ret;
443 478
444 tmp = ttm_bo_reference(&user_bo->dma.base); 479 tmp = ttm_bo_reference(&user_bo->dma.base);
445 ret = ttm_base_object_init(tfile, 480 ret = ttm_prime_object_init(tfile,
446 &user_bo->base, 481 size,
447 shareable, 482 &user_bo->prime,
448 ttm_buffer_type, 483 shareable,
449 &vmw_user_dmabuf_release, NULL); 484 ttm_buffer_type,
485 &vmw_user_dmabuf_release, NULL);
450 if (unlikely(ret != 0)) { 486 if (unlikely(ret != 0)) {
451 ttm_bo_unref(&tmp); 487 ttm_bo_unref(&tmp);
452 goto out_no_base_object; 488 goto out_no_base_object;
453 } 489 }
454 490
455 *p_dma_buf = &user_bo->dma; 491 *p_dma_buf = &user_bo->dma;
456 *handle = user_bo->base.hash.key; 492 *handle = user_bo->prime.base.hash.key;
457 493
458out_no_base_object: 494out_no_base_object:
459 return ret; 495 return ret;
@@ -475,8 +511,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
475 return -EPERM; 511 return -EPERM;
476 512
477 vmw_user_bo = vmw_user_dma_buffer(bo); 513 vmw_user_bo = vmw_user_dma_buffer(bo);
478 return (vmw_user_bo->base.tfile == tfile || 514 return (vmw_user_bo->prime.base.tfile == tfile ||
479 vmw_user_bo->base.shareable) ? 0 : -EPERM; 515 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
480} 516}
481 517
482int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 518int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
@@ -538,14 +574,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
538 return -ESRCH; 574 return -ESRCH;
539 } 575 }
540 576
541 if (unlikely(base->object_type != ttm_buffer_type)) { 577 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
542 ttm_base_object_unref(&base); 578 ttm_base_object_unref(&base);
543 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", 579 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
544 (unsigned long)handle); 580 (unsigned long)handle);
545 return -EINVAL; 581 return -EINVAL;
546 } 582 }
547 583
548 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 584 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
585 prime.base);
549 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 586 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
550 ttm_base_object_unref(&base); 587 ttm_base_object_unref(&base);
551 *out = &vmw_user_bo->dma; 588 *out = &vmw_user_bo->dma;
@@ -562,7 +599,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
562 return -EINVAL; 599 return -EINVAL;
563 600
564 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); 601 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
565 return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); 602 return ttm_ref_object_add(tfile, &user_bo->prime.base,
603 TTM_REF_USAGE, NULL);
566} 604}
567 605
568/* 606/*
@@ -777,53 +815,55 @@ err_ref:
777} 815}
778 816
779 817
818/**
819 * vmw_dumb_create - Create a dumb kms buffer
820 *
821 * @file_priv: Pointer to a struct drm_file identifying the caller.
822 * @dev: Pointer to the drm device.
823 * @args: Pointer to a struct drm_mode_create_dumb structure
824 *
825 * This is a driver callback for the core drm create_dumb functionality.
826 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
827 * that the arguments have a different format.
828 */
780int vmw_dumb_create(struct drm_file *file_priv, 829int vmw_dumb_create(struct drm_file *file_priv,
781 struct drm_device *dev, 830 struct drm_device *dev,
782 struct drm_mode_create_dumb *args) 831 struct drm_mode_create_dumb *args)
783{ 832{
784 struct vmw_private *dev_priv = vmw_priv(dev); 833 struct vmw_private *dev_priv = vmw_priv(dev);
785 struct vmw_master *vmaster = vmw_master(file_priv->master); 834 struct vmw_master *vmaster = vmw_master(file_priv->master);
786 struct vmw_user_dma_buffer *vmw_user_bo; 835 struct vmw_dma_buffer *dma_buf;
787 struct ttm_buffer_object *tmp;
788 int ret; 836 int ret;
789 837
790 args->pitch = args->width * ((args->bpp + 7) / 8); 838 args->pitch = args->width * ((args->bpp + 7) / 8);
791 args->size = args->pitch * args->height; 839 args->size = args->pitch * args->height;
792 840
793 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
794 if (vmw_user_bo == NULL)
795 return -ENOMEM;
796
797 ret = ttm_read_lock(&vmaster->lock, true); 841 ret = ttm_read_lock(&vmaster->lock, true);
798 if (ret != 0) { 842 if (unlikely(ret != 0))
799 kfree(vmw_user_bo);
800 return ret; 843 return ret;
801 }
802 844
803 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, 845 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
804 &vmw_vram_sys_placement, true, 846 args->size, false, &args->handle,
805 &vmw_user_dmabuf_destroy); 847 &dma_buf);
806 if (ret != 0)
807 goto out_no_dmabuf;
808
809 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
810 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
811 &vmw_user_bo->base,
812 false,
813 ttm_buffer_type,
814 &vmw_user_dmabuf_release, NULL);
815 if (unlikely(ret != 0)) 848 if (unlikely(ret != 0))
816 goto out_no_base_object; 849 goto out_no_dmabuf;
817
818 args->handle = vmw_user_bo->base.hash.key;
819 850
820out_no_base_object: 851 vmw_dmabuf_unreference(&dma_buf);
821 ttm_bo_unref(&tmp);
822out_no_dmabuf: 852out_no_dmabuf:
823 ttm_read_unlock(&vmaster->lock); 853 ttm_read_unlock(&vmaster->lock);
824 return ret; 854 return ret;
825} 855}
826 856
857/**
858 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
859 *
860 * @file_priv: Pointer to a struct drm_file identifying the caller.
861 * @dev: Pointer to the drm device.
862 * @handle: Handle identifying the dumb buffer.
863 * @offset: The address space offset returned.
864 *
865 * This is a driver callback for the core drm dumb_map_offset functionality.
866 */
827int vmw_dumb_map_offset(struct drm_file *file_priv, 867int vmw_dumb_map_offset(struct drm_file *file_priv,
828 struct drm_device *dev, uint32_t handle, 868 struct drm_device *dev, uint32_t handle,
829 uint64_t *offset) 869 uint64_t *offset)
@@ -841,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
841 return 0; 881 return 0;
842} 882}
843 883
884/**
885 * vmw_dumb_destroy - Destroy a dumb boffer
886 *
887 * @file_priv: Pointer to a struct drm_file identifying the caller.
888 * @dev: Pointer to the drm device.
889 * @handle: Handle identifying the dumb buffer.
890 *
891 * This is a driver callback for the core drm dumb_destroy functionality.
892 */
844int vmw_dumb_destroy(struct drm_file *file_priv, 893int vmw_dumb_destroy(struct drm_file *file_priv,
845 struct drm_device *dev, 894 struct drm_device *dev,
846 uint32_t handle) 895 uint32_t handle)
@@ -994,7 +1043,6 @@ void vmw_resource_unreserve(struct vmw_resource *res,
994 */ 1043 */
995static int 1044static int
996vmw_resource_check_buffer(struct vmw_resource *res, 1045vmw_resource_check_buffer(struct vmw_resource *res,
997 struct ww_acquire_ctx *ticket,
998 bool interruptible, 1046 bool interruptible,
999 struct ttm_validate_buffer *val_buf) 1047 struct ttm_validate_buffer *val_buf)
1000{ 1048{
@@ -1011,7 +1059,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
1011 INIT_LIST_HEAD(&val_list); 1059 INIT_LIST_HEAD(&val_list);
1012 val_buf->bo = ttm_bo_reference(&res->backup->base); 1060 val_buf->bo = ttm_bo_reference(&res->backup->base);
1013 list_add_tail(&val_buf->head, &val_list); 1061 list_add_tail(&val_buf->head, &val_list);
1014 ret = ttm_eu_reserve_buffers(ticket, &val_list); 1062 ret = ttm_eu_reserve_buffers(NULL, &val_list);
1015 if (unlikely(ret != 0)) 1063 if (unlikely(ret != 0))
1016 goto out_no_reserve; 1064 goto out_no_reserve;
1017 1065
@@ -1029,7 +1077,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
1029 return 0; 1077 return 0;
1030 1078
1031out_no_validate: 1079out_no_validate:
1032 ttm_eu_backoff_reservation(ticket, &val_list); 1080 ttm_eu_backoff_reservation(NULL, &val_list);
1033out_no_reserve: 1081out_no_reserve:
1034 ttm_bo_unref(&val_buf->bo); 1082 ttm_bo_unref(&val_buf->bo);
1035 if (backup_dirty) 1083 if (backup_dirty)
@@ -1074,8 +1122,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1074 * @val_buf: Backup buffer information. 1122 * @val_buf: Backup buffer information.
1075 */ 1123 */
1076static void 1124static void
1077vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, 1125vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1078 struct ttm_validate_buffer *val_buf)
1079{ 1126{
1080 struct list_head val_list; 1127 struct list_head val_list;
1081 1128
@@ -1084,7 +1131,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
1084 1131
1085 INIT_LIST_HEAD(&val_list); 1132 INIT_LIST_HEAD(&val_list);
1086 list_add_tail(&val_buf->head, &val_list); 1133 list_add_tail(&val_buf->head, &val_list);
1087 ttm_eu_backoff_reservation(ticket, &val_list); 1134 ttm_eu_backoff_reservation(NULL, &val_list);
1088 ttm_bo_unref(&val_buf->bo); 1135 ttm_bo_unref(&val_buf->bo);
1089} 1136}
1090 1137
@@ -1099,14 +1146,12 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1099{ 1146{
1100 struct ttm_validate_buffer val_buf; 1147 struct ttm_validate_buffer val_buf;
1101 const struct vmw_res_func *func = res->func; 1148 const struct vmw_res_func *func = res->func;
1102 struct ww_acquire_ctx ticket;
1103 int ret; 1149 int ret;
1104 1150
1105 BUG_ON(!func->may_evict); 1151 BUG_ON(!func->may_evict);
1106 1152
1107 val_buf.bo = NULL; 1153 val_buf.bo = NULL;
1108 ret = vmw_resource_check_buffer(res, &ticket, interruptible, 1154 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1109 &val_buf);
1110 if (unlikely(ret != 0)) 1155 if (unlikely(ret != 0))
1111 return ret; 1156 return ret;
1112 1157
@@ -1121,7 +1166,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1121 res->backup_dirty = true; 1166 res->backup_dirty = true;
1122 res->res_dirty = false; 1167 res->res_dirty = false;
1123out_no_unbind: 1168out_no_unbind:
1124 vmw_resource_backoff_reservation(&ticket, &val_buf); 1169 vmw_resource_backoff_reservation(&val_buf);
1125 1170
1126 return ret; 1171 return ret;
1127} 1172}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 26387c3d5a21..22406c8651ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
310 crtc->fb = NULL; 310 crtc->fb = NULL;
311 crtc->x = 0; 311 crtc->x = 0;
312 crtc->y = 0; 312 crtc->y = 0;
313 crtc->enabled = false;
313 314
314 vmw_sou_del_active(dev_priv, sou); 315 vmw_sou_del_active(dev_priv, sou);
315 316
@@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
370 crtc->fb = NULL; 371 crtc->fb = NULL;
371 crtc->x = 0; 372 crtc->x = 0;
372 crtc->y = 0; 373 crtc->y = 0;
374 crtc->enabled = false;
373 375
374 return ret; 376 return ret;
375 } 377 }
@@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
382 crtc->fb = fb; 384 crtc->fb = fb;
383 crtc->x = set->x; 385 crtc->x = set->x;
384 crtc->y = set->y; 386 crtc->y = set->y;
387 crtc->enabled = true;
385 388
386 return 0; 389 return 0;
387} 390}
@@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
464 encoder->possible_crtcs = (1 << unit); 467 encoder->possible_crtcs = (1 << unit);
465 encoder->possible_clones = 0; 468 encoder->possible_clones = 0;
466 469
470 (void) drm_sysfs_connector_add(connector);
471
467 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); 472 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
468 473
469 drm_mode_crtc_set_gamma_size(crtc, 256); 474 drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 582814339748..7de2ea8bd553 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -38,7 +38,7 @@
38 * @size: TTM accounting size for the surface. 38 * @size: TTM accounting size for the surface.
39 */ 39 */
40struct vmw_user_surface { 40struct vmw_user_surface {
41 struct ttm_base_object base; 41 struct ttm_prime_object prime;
42 struct vmw_surface srf; 42 struct vmw_surface srf;
43 uint32_t size; 43 uint32_t size;
44 uint32_t backup_handle; 44 uint32_t backup_handle;
@@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
580static struct vmw_resource * 580static struct vmw_resource *
581vmw_user_surface_base_to_res(struct ttm_base_object *base) 581vmw_user_surface_base_to_res(struct ttm_base_object *base)
582{ 582{
583 return &(container_of(base, struct vmw_user_surface, base)->srf.res); 583 return &(container_of(base, struct vmw_user_surface,
584 prime.base)->srf.res);
584} 585}
585 586
586/** 587/**
@@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
599 kfree(srf->offsets); 600 kfree(srf->offsets);
600 kfree(srf->sizes); 601 kfree(srf->sizes);
601 kfree(srf->snooper.image); 602 kfree(srf->snooper.image);
602 ttm_base_object_kfree(user_srf, base); 603 ttm_prime_object_kfree(user_srf, prime);
603 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 604 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
604} 605}
605 606
@@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
616{ 617{
617 struct ttm_base_object *base = *p_base; 618 struct ttm_base_object *base = *p_base;
618 struct vmw_user_surface *user_srf = 619 struct vmw_user_surface *user_srf =
619 container_of(base, struct vmw_user_surface, base); 620 container_of(base, struct vmw_user_surface, prime.base);
620 struct vmw_resource *res = &user_srf->srf.res; 621 struct vmw_resource *res = &user_srf->srf.res;
621 622
622 *p_base = NULL; 623 *p_base = NULL;
@@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
790 } 791 }
791 srf->snooper.crtc = NULL; 792 srf->snooper.crtc = NULL;
792 793
793 user_srf->base.shareable = false; 794 user_srf->prime.base.shareable = false;
794 user_srf->base.tfile = NULL; 795 user_srf->prime.base.tfile = NULL;
795 796
796 /** 797 /**
797 * From this point, the generic resource management functions 798 * From this point, the generic resource management functions
@@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
803 goto out_unlock; 804 goto out_unlock;
804 805
805 tmp = vmw_resource_reference(&srf->res); 806 tmp = vmw_resource_reference(&srf->res);
806 ret = ttm_base_object_init(tfile, &user_srf->base, 807 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
807 req->shareable, VMW_RES_SURFACE, 808 req->shareable, VMW_RES_SURFACE,
808 &vmw_user_surface_base_release, NULL); 809 &vmw_user_surface_base_release, NULL);
809 810
810 if (unlikely(ret != 0)) { 811 if (unlikely(ret != 0)) {
811 vmw_resource_unreference(&tmp); 812 vmw_resource_unreference(&tmp);
@@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
813 goto out_unlock; 814 goto out_unlock;
814 } 815 }
815 816
816 rep->sid = user_srf->base.hash.key; 817 rep->sid = user_srf->prime.base.hash.key;
817 vmw_resource_unreference(&res); 818 vmw_resource_unreference(&res);
818 819
819 ttm_read_unlock(&vmaster->lock); 820 ttm_read_unlock(&vmaster->lock);
@@ -823,7 +824,7 @@ out_no_copy:
823out_no_offsets: 824out_no_offsets:
824 kfree(srf->sizes); 825 kfree(srf->sizes);
825out_no_sizes: 826out_no_sizes:
826 ttm_base_object_kfree(user_srf, base); 827 ttm_prime_object_kfree(user_srf, prime);
827out_no_user_srf: 828out_no_user_srf:
828 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 829 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
829out_unlock: 830out_unlock:
@@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
859 return -EINVAL; 860 return -EINVAL;
860 } 861 }
861 862
862 if (unlikely(base->object_type != VMW_RES_SURFACE)) 863 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
863 goto out_bad_resource; 864 goto out_bad_resource;
864 865
865 user_srf = container_of(base, struct vmw_user_surface, base); 866 user_srf = container_of(base, struct vmw_user_surface, prime.base);
866 srf = &user_srf->srf; 867 srf = &user_srf->srf;
867 868
868 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); 869 ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
870 TTM_REF_USAGE, NULL);
869 if (unlikely(ret != 0)) { 871 if (unlikely(ret != 0)) {
870 DRM_ERROR("Could not add a reference to a surface.\n"); 872 DRM_ERROR("Could not add a reference to a surface.\n");
871 goto out_no_reference; 873 goto out_no_reference;
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 509383f8be03..6a929591aa73 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -19,6 +19,7 @@
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21 21
22#include "bus.h"
22#include "dev.h" 23#include "dev.h"
23 24
24static DEFINE_MUTEX(clients_lock); 25static DEFINE_MUTEX(clients_lock);
@@ -257,7 +258,7 @@ static int host1x_unregister_client(struct host1x *host1x,
257 return -ENODEV; 258 return -ENODEV;
258} 259}
259 260
260struct bus_type host1x_bus_type = { 261static struct bus_type host1x_bus_type = {
261 .name = "host1x", 262 .name = "host1x",
262}; 263};
263 264
@@ -301,7 +302,7 @@ static int host1x_device_add(struct host1x *host1x,
301 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; 302 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
302 device->dev.dma_mask = &device->dev.coherent_dma_mask; 303 device->dev.dma_mask = &device->dev.coherent_dma_mask;
303 device->dev.release = host1x_device_release; 304 device->dev.release = host1x_device_release;
304 dev_set_name(&device->dev, driver->name); 305 dev_set_name(&device->dev, "%s", driver->name);
305 device->dev.bus = &host1x_bus_type; 306 device->dev.bus = &host1x_bus_type;
306 device->dev.parent = host1x->dev; 307 device->dev.parent = host1x->dev;
307 308
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 37e2a63241a9..6b09b71940c2 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -54,8 +54,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
54 u32 *p = (u32 *)((u32)pb->mapped + getptr); 54 u32 *p = (u32 *)((u32)pb->mapped + getptr);
55 *(p++) = HOST1X_OPCODE_NOP; 55 *(p++) = HOST1X_OPCODE_NOP;
56 *(p++) = HOST1X_OPCODE_NOP; 56 *(p++) = HOST1X_OPCODE_NOP;
57 dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__, 57 dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__,
58 pb->phys + getptr); 58 (u64)pb->phys + getptr);
59 getptr = (getptr + 8) & (pb->size_bytes - 1); 59 getptr = (getptr + 8) & (pb->size_bytes - 1);
60 } 60 }
61 wmb(); 61 wmb();
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 640c75ca5a8b..f72c873eff81 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
163 continue; 163 continue;
164 } 164 }
165 165
166 host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n", 166 host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n",
167 g->base, g->offset, g->words); 167 (u64)g->base, g->offset, g->words);
168 168
169 show_gather(o, g->base + g->offset, g->words, cdma, 169 show_gather(o, g->base + g->offset, g->words, cdma,
170 g->base, mapped); 170 g->base, mapped);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 329fbb9b5976..34e2d39d4ce8 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -460,6 +460,7 @@ config HID_MULTITOUCH
460 - Stantum multitouch panels 460 - Stantum multitouch panels
461 - Touch International Panels 461 - Touch International Panels
462 - Unitec Panels 462 - Unitec Panels
463 - Wistron optical touch panels
463 - XAT optical touch panels 464 - XAT optical touch panels
464 - Xiroku optical touch panels 465 - Xiroku optical touch panels
465 - Zytronic touch panels 466 - Zytronic touch panels
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index a42e6a394c5e..0e6a42d37eb6 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -297,6 +297,9 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
297 297
298 appleir->hid = hid; 298 appleir->hid = hid;
299 299
300 /* force input as some remotes bypass the input registration */
301 hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
302
300 spin_lock_init(&appleir->lock); 303 spin_lock_init(&appleir->lock);
301 setup_timer(&appleir->key_up_timer, 304 setup_timer(&appleir->key_up_timer,
302 key_up_tick, (unsigned long) appleir); 305 key_up_tick, (unsigned long) appleir);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8c10f2742233..253fe23ef7fe 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1723,6 +1723,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1723 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, 1723 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1724 { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, 1724 { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
1725 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, 1725 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
1726 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
1726 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, 1727 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
1727 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, 1728 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
1728 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, 1729 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
@@ -1879,7 +1880,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
1879 1880
1880 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, 1881 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
1881 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1882 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1882 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1883 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1883 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
1884 { } 1884 { }
1885}; 1885};
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 76559629568c..f9304cb37154 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -489,6 +489,7 @@
489#define USB_VENDOR_ID_KYE 0x0458 489#define USB_VENDOR_ID_KYE 0x0458
490#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087 490#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
491#define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138 491#define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
492#define USB_DEVICE_ID_GENIUS_MANTICORE 0x0153
492#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018 493#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018
493#define USB_DEVICE_ID_KYE_GPEN_560 0x5003 494#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
494#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 495#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
@@ -640,7 +641,6 @@
640#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 641#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
641 642
642#define USB_VENDOR_ID_NINTENDO 0x057e 643#define USB_VENDOR_ID_NINTENDO 0x057e
643#define USB_VENDOR_ID_NINTENDO2 0x054c
644#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306 644#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
645#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330 645#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330
646 646
@@ -902,6 +902,9 @@
902#define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802 902#define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802
903#define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804 903#define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804
904 904
905#define USB_VENDOR_ID_WISTRON 0x0fb8
906#define USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH 0x1109
907
905#define USB_VENDOR_ID_X_TENSIONS 0x1ae7 908#define USB_VENDOR_ID_X_TENSIONS 0x1ae7
906#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001 909#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001
907 910
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index 73845120295e..e77696367591 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -342,6 +342,10 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
342 rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83, 342 rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
343 "Genius Gx Imperator Keyboard"); 343 "Genius Gx Imperator Keyboard");
344 break; 344 break;
345 case USB_DEVICE_ID_GENIUS_MANTICORE:
346 rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
347 "Genius Manticore Keyboard");
348 break;
345 } 349 }
346 return rdesc; 350 return rdesc;
347} 351}
@@ -418,6 +422,14 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
418 goto enabling_err; 422 goto enabling_err;
419 } 423 }
420 break; 424 break;
425 case USB_DEVICE_ID_GENIUS_MANTICORE:
426 /*
427 * The manticore keyboard needs to have all the interfaces
428 * opened at least once to be fully functional.
429 */
430 if (hid_hw_open(hdev))
431 hid_hw_close(hdev);
432 break;
421 } 433 }
422 434
423 return 0; 435 return 0;
@@ -439,6 +451,8 @@ static const struct hid_device_id kye_devices[] = {
439 USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, 451 USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
440 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 452 { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
441 USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, 453 USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
454 { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
455 USB_DEVICE_ID_GENIUS_MANTICORE) },
442 { } 456 { }
443}; 457};
444MODULE_DEVICE_TABLE(hid, kye_devices); 458MODULE_DEVICE_TABLE(hid, kye_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index a2cedb8ae1c0..d83b1e8b505b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1335,6 +1335,12 @@ static const struct hid_device_id mt_devices[] = {
1335 { .driver_data = MT_CLS_NSMU, 1335 { .driver_data = MT_CLS_NSMU,
1336 MT_USB_DEVICE(USB_VENDOR_ID_UNITEC, 1336 MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
1337 USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) }, 1337 USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
1338
1339 /* Wistron panels */
1340 { .driver_data = MT_CLS_NSMU,
1341 MT_USB_DEVICE(USB_VENDOR_ID_WISTRON,
1342 USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH) },
1343
1338 /* XAT */ 1344 /* XAT */
1339 { .driver_data = MT_CLS_NSMU, 1345 { .driver_data = MT_CLS_NSMU,
1340 MT_USB_DEVICE(USB_VENDOR_ID_XAT, 1346 MT_USB_DEVICE(USB_VENDOR_ID_XAT,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index a184e1921c11..8fab82829f8b 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -112,13 +112,15 @@ static int sensor_hub_get_physical_device_count(
112 112
113static void sensor_hub_fill_attr_info( 113static void sensor_hub_fill_attr_info(
114 struct hid_sensor_hub_attribute_info *info, 114 struct hid_sensor_hub_attribute_info *info,
115 s32 index, s32 report_id, s32 units, s32 unit_expo, s32 size) 115 s32 index, s32 report_id, struct hid_field *field)
116{ 116{
117 info->index = index; 117 info->index = index;
118 info->report_id = report_id; 118 info->report_id = report_id;
119 info->units = units; 119 info->units = field->unit;
120 info->unit_expo = unit_expo; 120 info->unit_expo = field->unit_exponent;
121 info->size = size/8; 121 info->size = (field->report_size * field->report_count)/8;
122 info->logical_minimum = field->logical_minimum;
123 info->logical_maximum = field->logical_maximum;
122} 124}
123 125
124static struct hid_sensor_hub_callbacks *sensor_hub_get_callback( 126static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
@@ -325,9 +327,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
325 if (field->physical == usage_id && 327 if (field->physical == usage_id &&
326 field->logical == attr_usage_id) { 328 field->logical == attr_usage_id) {
327 sensor_hub_fill_attr_info(info, i, report->id, 329 sensor_hub_fill_attr_info(info, i, report->id,
328 field->unit, field->unit_exponent, 330 field);
329 field->report_size *
330 field->report_count);
331 ret = 0; 331 ret = 0;
332 } else { 332 } else {
333 for (j = 0; j < field->maxusage; ++j) { 333 for (j = 0; j < field->maxusage; ++j) {
@@ -336,11 +336,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
336 field->usage[j].collection_index == 336 field->usage[j].collection_index ==
337 collection_index) { 337 collection_index) {
338 sensor_hub_fill_attr_info(info, 338 sensor_hub_fill_attr_info(info,
339 i, report->id, 339 i, report->id, field);
340 field->unit,
341 field->unit_exponent,
342 field->report_size *
343 field->report_count);
344 ret = 0; 340 ret = 0;
345 break; 341 break;
346 } 342 }
@@ -573,6 +569,8 @@ static int sensor_hub_probe(struct hid_device *hdev,
573 goto err_free_names; 569 goto err_free_names;
574 } 570 }
575 sd->hid_sensor_hub_client_devs[ 571 sd->hid_sensor_hub_client_devs[
572 sd->hid_sensor_client_cnt].id = PLATFORM_DEVID_AUTO;
573 sd->hid_sensor_hub_client_devs[
576 sd->hid_sensor_client_cnt].name = name; 574 sd->hid_sensor_client_cnt].name = name;
577 sd->hid_sensor_hub_client_devs[ 575 sd->hid_sensor_hub_client_devs[
578 sd->hid_sensor_client_cnt].platform_data = 576 sd->hid_sensor_client_cnt].platform_data =
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index da551d113762..098af2f84b8c 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -225,6 +225,13 @@ static const unsigned int buzz_keymap[] = {
225struct sony_sc { 225struct sony_sc {
226 unsigned long quirks; 226 unsigned long quirks;
227 227
228#ifdef CONFIG_SONY_FF
229 struct work_struct rumble_worker;
230 struct hid_device *hdev;
231 __u8 left;
232 __u8 right;
233#endif
234
228 void *extra; 235 void *extra;
229}; 236};
230 237
@@ -615,9 +622,9 @@ static void buzz_remove(struct hid_device *hdev)
615} 622}
616 623
617#ifdef CONFIG_SONY_FF 624#ifdef CONFIG_SONY_FF
618static int sony_play_effect(struct input_dev *dev, void *data, 625static void sony_rumble_worker(struct work_struct *work)
619 struct ff_effect *effect)
620{ 626{
627 struct sony_sc *sc = container_of(work, struct sony_sc, rumble_worker);
621 unsigned char buf[] = { 628 unsigned char buf[] = {
622 0x01, 629 0x01,
623 0x00, 0xff, 0x00, 0xff, 0x00, 630 0x00, 0xff, 0x00, 0xff, 0x00,
@@ -628,21 +635,28 @@ static int sony_play_effect(struct input_dev *dev, void *data,
628 0xff, 0x27, 0x10, 0x00, 0x32, 635 0xff, 0x27, 0x10, 0x00, 0x32,
629 0x00, 0x00, 0x00, 0x00, 0x00 636 0x00, 0x00, 0x00, 0x00, 0x00
630 }; 637 };
631 __u8 left; 638
632 __u8 right; 639 buf[3] = sc->right;
640 buf[5] = sc->left;
641
642 sc->hdev->hid_output_raw_report(sc->hdev, buf, sizeof(buf),
643 HID_OUTPUT_REPORT);
644}
645
646static int sony_play_effect(struct input_dev *dev, void *data,
647 struct ff_effect *effect)
648{
633 struct hid_device *hid = input_get_drvdata(dev); 649 struct hid_device *hid = input_get_drvdata(dev);
650 struct sony_sc *sc = hid_get_drvdata(hid);
634 651
635 if (effect->type != FF_RUMBLE) 652 if (effect->type != FF_RUMBLE)
636 return 0; 653 return 0;
637 654
638 left = effect->u.rumble.strong_magnitude / 256; 655 sc->left = effect->u.rumble.strong_magnitude / 256;
639 right = effect->u.rumble.weak_magnitude ? 1 : 0; 656 sc->right = effect->u.rumble.weak_magnitude ? 1 : 0;
640
641 buf[3] = right;
642 buf[5] = left;
643 657
644 return hid->hid_output_raw_report(hid, buf, sizeof(buf), 658 schedule_work(&sc->rumble_worker);
645 HID_OUTPUT_REPORT); 659 return 0;
646} 660}
647 661
648static int sony_init_ff(struct hid_device *hdev) 662static int sony_init_ff(struct hid_device *hdev)
@@ -650,16 +664,31 @@ static int sony_init_ff(struct hid_device *hdev)
650 struct hid_input *hidinput = list_entry(hdev->inputs.next, 664 struct hid_input *hidinput = list_entry(hdev->inputs.next,
651 struct hid_input, list); 665 struct hid_input, list);
652 struct input_dev *input_dev = hidinput->input; 666 struct input_dev *input_dev = hidinput->input;
667 struct sony_sc *sc = hid_get_drvdata(hdev);
668
669 sc->hdev = hdev;
670 INIT_WORK(&sc->rumble_worker, sony_rumble_worker);
653 671
654 input_set_capability(input_dev, EV_FF, FF_RUMBLE); 672 input_set_capability(input_dev, EV_FF, FF_RUMBLE);
655 return input_ff_create_memless(input_dev, NULL, sony_play_effect); 673 return input_ff_create_memless(input_dev, NULL, sony_play_effect);
656} 674}
657 675
676static void sony_destroy_ff(struct hid_device *hdev)
677{
678 struct sony_sc *sc = hid_get_drvdata(hdev);
679
680 cancel_work_sync(&sc->rumble_worker);
681}
682
658#else 683#else
659static int sony_init_ff(struct hid_device *hdev) 684static int sony_init_ff(struct hid_device *hdev)
660{ 685{
661 return 0; 686 return 0;
662} 687}
688
689static void sony_destroy_ff(struct hid_device *hdev)
690{
691}
663#endif 692#endif
664 693
665static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) 694static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@ -728,6 +757,8 @@ static void sony_remove(struct hid_device *hdev)
728 if (sc->quirks & BUZZ_CONTROLLER) 757 if (sc->quirks & BUZZ_CONTROLLER)
729 buzz_remove(hdev); 758 buzz_remove(hdev);
730 759
760 sony_destroy_ff(hdev);
761
731 hid_hw_stop(hdev); 762 hid_hw_stop(hdev);
732} 763}
733 764
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 1446f526ee8b..abb20db2b443 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -834,8 +834,7 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
834 goto done; 834 goto done;
835 } 835 }
836 836
837 if (vendor == USB_VENDOR_ID_NINTENDO || 837 if (vendor == USB_VENDOR_ID_NINTENDO) {
838 vendor == USB_VENDOR_ID_NINTENDO2) {
839 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) { 838 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
840 devtype = WIIMOTE_DEV_GEN10; 839 devtype = WIIMOTE_DEV_GEN10;
841 goto done; 840 goto done;
@@ -1856,8 +1855,6 @@ static void wiimote_hid_remove(struct hid_device *hdev)
1856static const struct hid_device_id wiimote_hid_devices[] = { 1855static const struct hid_device_id wiimote_hid_devices[] = {
1857 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1856 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
1858 USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1857 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1859 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
1860 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1861 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1858 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
1862 USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1859 USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
1863 { } 1860 { }
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index ae48d18ee315..5f7e55f4b7f0 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -1008,7 +1008,7 @@ static int i2c_hid_probe(struct i2c_client *client,
1008 hid->hid_get_raw_report = i2c_hid_get_raw_report; 1008 hid->hid_get_raw_report = i2c_hid_get_raw_report;
1009 hid->hid_output_raw_report = i2c_hid_output_raw_report; 1009 hid->hid_output_raw_report = i2c_hid_output_raw_report;
1010 hid->dev.parent = &client->dev; 1010 hid->dev.parent = &client->dev;
1011 ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev)); 1011 ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev));
1012 hid->bus = BUS_I2C; 1012 hid->bus = BUS_I2C;
1013 hid->version = le16_to_cpu(ihid->hdesc.bcdVersion); 1013 hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
1014 hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); 1014 hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 93b00d76374c..cedc6da93c19 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -287,7 +287,7 @@ static int uhid_event_from_user(const char __user *buffer, size_t len,
287 */ 287 */
288 struct uhid_create_req_compat *compat; 288 struct uhid_create_req_compat *compat;
289 289
290 compat = kmalloc(sizeof(*compat), GFP_KERNEL); 290 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
291 if (!compat) 291 if (!compat)
292 return -ENOMEM; 292 return -ENOMEM;
293 293
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index b3ab9d43bb3e..52d548f1dc1d 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -656,6 +656,7 @@ config SENSORS_LM75
656 656
657 - Analog Devices ADT75 657 - Analog Devices ADT75
658 - Dallas Semiconductor DS75, DS1775 and DS7505 658 - Dallas Semiconductor DS75, DS1775 and DS7505
659 - Global Mixed-mode Technology (GMT) G751
659 - Maxim MAX6625 and MAX6626 660 - Maxim MAX6625 and MAX6626
660 - Microchip MCP980x 661 - Microchip MCP980x
661 - National Semiconductor LM75, LM75A 662 - National Semiconductor LM75, LM75A
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 8d40da314a8e..6a34f7f48eb9 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -602,9 +602,8 @@ static int read_domain_devices(struct acpi_power_meter_resource *resource)
602 602
603 /* Create a symlink to domain objects */ 603 /* Create a symlink to domain objects */
604 resource->domain_devices[i] = NULL; 604 resource->domain_devices[i] = NULL;
605 status = acpi_bus_get_device(element->reference.handle, 605 if (acpi_bus_get_device(element->reference.handle,
606 &resource->domain_devices[i]); 606 &resource->domain_devices[i]))
607 if (ACPI_FAILURE(status))
608 continue; 607 continue;
609 608
610 obj = resource->domain_devices[i]; 609 obj = resource->domain_devices[i];
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 1d7ff46812c3..dafc63c6932d 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -18,7 +18,6 @@
18#include <linux/err.h> 18#include <linux/err.h>
19 19
20#include <acpi/acpi.h> 20#include <acpi/acpi.h>
21#include <acpi/acpixf.h>
22#include <acpi/acpi_drivers.h> 21#include <acpi/acpi_drivers.h>
23#include <acpi/acpi_bus.h> 22#include <acpi/acpi_bus.h>
24 23
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 2dc37c7c6947..7d68a08baaa8 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -43,6 +43,7 @@
43 * @last_update: time of last update (jiffies) 43 * @last_update: time of last update (jiffies)
44 * @temperature: cached temperature measurement value 44 * @temperature: cached temperature measurement value
45 * @humidity: cached humidity measurement value 45 * @humidity: cached humidity measurement value
46 * @write_length: length for I2C measurement request
46 */ 47 */
47struct hih6130 { 48struct hih6130 {
48 struct device *hwmon_dev; 49 struct device *hwmon_dev;
@@ -51,6 +52,7 @@ struct hih6130 {
51 unsigned long last_update; 52 unsigned long last_update;
52 int temperature; 53 int temperature;
53 int humidity; 54 int humidity;
55 size_t write_length;
54}; 56};
55 57
56/** 58/**
@@ -121,8 +123,15 @@ static int hih6130_update_measurements(struct i2c_client *client)
121 */ 123 */
122 if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) { 124 if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) {
123 125
124 /* write to slave address, no data, to request a measurement */ 126 /*
125 ret = i2c_master_send(client, tmp, 0); 127 * Write to slave address to request a measurement.
128 * According with the datasheet it should be with no data, but
129 * for systems with I2C bus drivers that do not allow zero
130 * length packets we write one dummy byte to allow sensor
131 * measurements on them.
132 */
133 tmp[0] = 0;
134 ret = i2c_master_send(client, tmp, hih6130->write_length);
126 if (ret < 0) 135 if (ret < 0)
127 goto out; 136 goto out;
128 137
@@ -252,6 +261,9 @@ static int hih6130_probe(struct i2c_client *client,
252 goto fail_remove_sysfs; 261 goto fail_remove_sysfs;
253 } 262 }
254 263
264 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
265 hih6130->write_length = 1;
266
255 return 0; 267 return 0;
256 268
257fail_remove_sysfs: 269fail_remove_sysfs:
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index c03b490bba81..7e3ef134f1d2 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -39,6 +39,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
39 ds1775, 39 ds1775,
40 ds75, 40 ds75,
41 ds7505, 41 ds7505,
42 g751,
42 lm75, 43 lm75,
43 lm75a, 44 lm75a,
44 max6625, 45 max6625,
@@ -208,6 +209,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
208 data->resolution = 12; 209 data->resolution = 12;
209 data->sample_time = HZ / 4; 210 data->sample_time = HZ / 4;
210 break; 211 break;
212 case g751:
211 case lm75: 213 case lm75:
212 case lm75a: 214 case lm75a:
213 data->resolution = 9; 215 data->resolution = 9;
@@ -296,6 +298,7 @@ static const struct i2c_device_id lm75_ids[] = {
296 { "ds1775", ds1775, }, 298 { "ds1775", ds1775, },
297 { "ds75", ds75, }, 299 { "ds75", ds75, },
298 { "ds7505", ds7505, }, 300 { "ds7505", ds7505, },
301 { "g751", g751, },
299 { "lm75", lm75, }, 302 { "lm75", lm75, },
300 { "lm75a", lm75a, }, 303 { "lm75a", lm75a, },
301 { "max6625", max6625, }, 304 { "max6625", max6625, },
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 6cf6bff79003..a2f3b4a365e4 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -94,6 +94,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
94{ 94{
95 if (rpm <= 0) 95 if (rpm <= 0)
96 return 255; 96 return 255;
97 if (rpm > 1350000)
98 return 1;
97 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 99 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
98} 100}
99 101
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 4c4c1421bf28..8b8f3aa49726 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1610,12 +1610,14 @@ static int lm90_probe(struct i2c_client *client,
1610 "lm90", client); 1610 "lm90", client);
1611 if (err < 0) { 1611 if (err < 0) {
1612 dev_err(dev, "cannot request IRQ %d\n", client->irq); 1612 dev_err(dev, "cannot request IRQ %d\n", client->irq);
1613 goto exit_remove_files; 1613 goto exit_unregister;
1614 } 1614 }
1615 } 1615 }
1616 1616
1617 return 0; 1617 return 0;
1618 1618
1619exit_unregister:
1620 hwmon_device_unregister(data->hwmon_dev);
1619exit_remove_files: 1621exit_remove_files:
1620 lm90_remove_files(client, data); 1622 lm90_remove_files(client, data);
1621exit_restore: 1623exit_restore:
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index d17325db0ea3..cf811c1a1475 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -274,6 +274,8 @@ static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 };
274static const u16 NCT6775_REG_TEMP[] = { 274static const u16 NCT6775_REG_TEMP[] = {
275 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; 275 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
276 276
277static const u16 NCT6775_REG_TEMP_MON[] = { 0x73, 0x75, 0x77 };
278
277static const u16 NCT6775_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6775_REG_TEMP)] = { 279static const u16 NCT6775_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6775_REG_TEMP)] = {
278 0, 0x152, 0x252, 0x628, 0x629, 0x62A }; 280 0, 0x152, 0x252, 0x628, 0x629, 0x62A };
279static const u16 NCT6775_REG_TEMP_HYST[ARRAY_SIZE(NCT6775_REG_TEMP)] = { 281static const u16 NCT6775_REG_TEMP_HYST[ARRAY_SIZE(NCT6775_REG_TEMP)] = {
@@ -454,6 +456,7 @@ static const u16 NCT6779_REG_CRITICAL_PWM[] = {
454 0x137, 0x237, 0x337, 0x837, 0x937, 0xa37 }; 456 0x137, 0x237, 0x337, 0x837, 0x937, 0xa37 };
455 457
456static const u16 NCT6779_REG_TEMP[] = { 0x27, 0x150 }; 458static const u16 NCT6779_REG_TEMP[] = { 0x27, 0x150 };
459static const u16 NCT6779_REG_TEMP_MON[] = { 0x73, 0x75, 0x77, 0x79, 0x7b };
457static const u16 NCT6779_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6779_REG_TEMP)] = { 460static const u16 NCT6779_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6779_REG_TEMP)] = {
458 0x18, 0x152 }; 461 0x18, 0x152 };
459static const u16 NCT6779_REG_TEMP_HYST[ARRAY_SIZE(NCT6779_REG_TEMP)] = { 462static const u16 NCT6779_REG_TEMP_HYST[ARRAY_SIZE(NCT6779_REG_TEMP)] = {
@@ -507,6 +510,13 @@ static const u16 NCT6779_REG_TEMP_CRIT[ARRAY_SIZE(nct6779_temp_label) - 1]
507 510
508#define NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE 0x28 511#define NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE 0x28
509 512
513static const u16 NCT6791_REG_WEIGHT_TEMP_SEL[6] = { 0, 0x239 };
514static const u16 NCT6791_REG_WEIGHT_TEMP_STEP[6] = { 0, 0x23a };
515static const u16 NCT6791_REG_WEIGHT_TEMP_STEP_TOL[6] = { 0, 0x23b };
516static const u16 NCT6791_REG_WEIGHT_DUTY_STEP[6] = { 0, 0x23c };
517static const u16 NCT6791_REG_WEIGHT_TEMP_BASE[6] = { 0, 0x23d };
518static const u16 NCT6791_REG_WEIGHT_DUTY_BASE[6] = { 0, 0x23e };
519
510static const u16 NCT6791_REG_ALARM[NUM_REG_ALARM] = { 520static const u16 NCT6791_REG_ALARM[NUM_REG_ALARM] = {
511 0x459, 0x45A, 0x45B, 0x568, 0x45D }; 521 0x459, 0x45A, 0x45B, 0x568, 0x45D };
512 522
@@ -534,6 +544,7 @@ static const u16 NCT6106_REG_IN[] = {
534 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x07, 0x08, 0x09 }; 544 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x07, 0x08, 0x09 };
535 545
536static const u16 NCT6106_REG_TEMP[] = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15 }; 546static const u16 NCT6106_REG_TEMP[] = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15 };
547static const u16 NCT6106_REG_TEMP_MON[] = { 0x18, 0x19, 0x1a };
537static const u16 NCT6106_REG_TEMP_HYST[] = { 548static const u16 NCT6106_REG_TEMP_HYST[] = {
538 0xc3, 0xc7, 0xcb, 0xcf, 0xd3, 0xd7 }; 549 0xc3, 0xc7, 0xcb, 0xcf, 0xd3, 0xd7 };
539static const u16 NCT6106_REG_TEMP_OVER[] = { 550static const u16 NCT6106_REG_TEMP_OVER[] = {
@@ -1307,6 +1318,9 @@ static void nct6775_update_pwm(struct device *dev)
1307 if (reg & 0x80) 1318 if (reg & 0x80)
1308 data->pwm[2][i] = 0; 1319 data->pwm[2][i] = 0;
1309 1320
1321 if (!data->REG_WEIGHT_TEMP_SEL[i])
1322 continue;
1323
1310 reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); 1324 reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
1311 data->pwm_weight_temp_sel[i] = reg & 0x1f; 1325 data->pwm_weight_temp_sel[i] = reg & 0x1f;
1312 /* If weight is disabled, report weight source as 0 */ 1326 /* If weight is disabled, report weight source as 0 */
@@ -2852,6 +2866,9 @@ static umode_t nct6775_pwm_is_visible(struct kobject *kobj,
2852 if (!(data->has_pwm & (1 << pwm))) 2866 if (!(data->has_pwm & (1 << pwm)))
2853 return 0; 2867 return 0;
2854 2868
2869 if ((nr >= 14 && nr <= 18) || nr == 21) /* weight */
2870 if (!data->REG_WEIGHT_TEMP_SEL[pwm])
2871 return 0;
2855 if (nr == 19 && data->REG_PWM[3] == NULL) /* pwm_max */ 2872 if (nr == 19 && data->REG_PWM[3] == NULL) /* pwm_max */
2856 return 0; 2873 return 0;
2857 if (nr == 20 && data->REG_PWM[4] == NULL) /* pwm_step */ 2874 if (nr == 20 && data->REG_PWM[4] == NULL) /* pwm_step */
@@ -2945,11 +2962,11 @@ static struct sensor_device_template *nct6775_attributes_pwm_template[] = {
2945 &sensor_dev_template_pwm_step_down_time, 2962 &sensor_dev_template_pwm_step_down_time,
2946 &sensor_dev_template_pwm_start, 2963 &sensor_dev_template_pwm_start,
2947 &sensor_dev_template_pwm_floor, 2964 &sensor_dev_template_pwm_floor,
2948 &sensor_dev_template_pwm_weight_temp_sel, 2965 &sensor_dev_template_pwm_weight_temp_sel, /* 14 */
2949 &sensor_dev_template_pwm_weight_temp_step, 2966 &sensor_dev_template_pwm_weight_temp_step,
2950 &sensor_dev_template_pwm_weight_temp_step_tol, 2967 &sensor_dev_template_pwm_weight_temp_step_tol,
2951 &sensor_dev_template_pwm_weight_temp_step_base, 2968 &sensor_dev_template_pwm_weight_temp_step_base,
2952 &sensor_dev_template_pwm_weight_duty_step, 2969 &sensor_dev_template_pwm_weight_duty_step, /* 18 */
2953 &sensor_dev_template_pwm_max, /* 19 */ 2970 &sensor_dev_template_pwm_max, /* 19 */
2954 &sensor_dev_template_pwm_step, /* 20 */ 2971 &sensor_dev_template_pwm_step, /* 20 */
2955 &sensor_dev_template_pwm_weight_duty_base, /* 21 */ 2972 &sensor_dev_template_pwm_weight_duty_base, /* 21 */
@@ -3253,9 +3270,9 @@ static int nct6775_probe(struct platform_device *pdev)
3253 int i, s, err = 0; 3270 int i, s, err = 0;
3254 int src, mask, available; 3271 int src, mask, available;
3255 const u16 *reg_temp, *reg_temp_over, *reg_temp_hyst, *reg_temp_config; 3272 const u16 *reg_temp, *reg_temp_over, *reg_temp_hyst, *reg_temp_config;
3256 const u16 *reg_temp_alternate, *reg_temp_crit; 3273 const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
3257 const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL; 3274 const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
3258 int num_reg_temp; 3275 int num_reg_temp, num_reg_temp_mon;
3259 u8 cr2a; 3276 u8 cr2a;
3260 struct attribute_group *group; 3277 struct attribute_group *group;
3261 struct device *hwmon_dev; 3278 struct device *hwmon_dev;
@@ -3338,7 +3355,9 @@ static int nct6775_probe(struct platform_device *pdev)
3338 data->BEEP_BITS = NCT6106_BEEP_BITS; 3355 data->BEEP_BITS = NCT6106_BEEP_BITS;
3339 3356
3340 reg_temp = NCT6106_REG_TEMP; 3357 reg_temp = NCT6106_REG_TEMP;
3358 reg_temp_mon = NCT6106_REG_TEMP_MON;
3341 num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP); 3359 num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
3360 num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON);
3342 reg_temp_over = NCT6106_REG_TEMP_OVER; 3361 reg_temp_over = NCT6106_REG_TEMP_OVER;
3343 reg_temp_hyst = NCT6106_REG_TEMP_HYST; 3362 reg_temp_hyst = NCT6106_REG_TEMP_HYST;
3344 reg_temp_config = NCT6106_REG_TEMP_CONFIG; 3363 reg_temp_config = NCT6106_REG_TEMP_CONFIG;
@@ -3410,7 +3429,9 @@ static int nct6775_probe(struct platform_device *pdev)
3410 data->REG_BEEP = NCT6775_REG_BEEP; 3429 data->REG_BEEP = NCT6775_REG_BEEP;
3411 3430
3412 reg_temp = NCT6775_REG_TEMP; 3431 reg_temp = NCT6775_REG_TEMP;
3432 reg_temp_mon = NCT6775_REG_TEMP_MON;
3413 num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP); 3433 num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
3434 num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON);
3414 reg_temp_over = NCT6775_REG_TEMP_OVER; 3435 reg_temp_over = NCT6775_REG_TEMP_OVER;
3415 reg_temp_hyst = NCT6775_REG_TEMP_HYST; 3436 reg_temp_hyst = NCT6775_REG_TEMP_HYST;
3416 reg_temp_config = NCT6775_REG_TEMP_CONFIG; 3437 reg_temp_config = NCT6775_REG_TEMP_CONFIG;
@@ -3480,7 +3501,9 @@ static int nct6775_probe(struct platform_device *pdev)
3480 data->REG_BEEP = NCT6776_REG_BEEP; 3501 data->REG_BEEP = NCT6776_REG_BEEP;
3481 3502
3482 reg_temp = NCT6775_REG_TEMP; 3503 reg_temp = NCT6775_REG_TEMP;
3504 reg_temp_mon = NCT6775_REG_TEMP_MON;
3483 num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP); 3505 num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
3506 num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON);
3484 reg_temp_over = NCT6775_REG_TEMP_OVER; 3507 reg_temp_over = NCT6775_REG_TEMP_OVER;
3485 reg_temp_hyst = NCT6775_REG_TEMP_HYST; 3508 reg_temp_hyst = NCT6775_REG_TEMP_HYST;
3486 reg_temp_config = NCT6776_REG_TEMP_CONFIG; 3509 reg_temp_config = NCT6776_REG_TEMP_CONFIG;
@@ -3554,7 +3577,9 @@ static int nct6775_probe(struct platform_device *pdev)
3554 data->REG_BEEP = NCT6776_REG_BEEP; 3577 data->REG_BEEP = NCT6776_REG_BEEP;
3555 3578
3556 reg_temp = NCT6779_REG_TEMP; 3579 reg_temp = NCT6779_REG_TEMP;
3580 reg_temp_mon = NCT6779_REG_TEMP_MON;
3557 num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP); 3581 num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
3582 num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON);
3558 reg_temp_over = NCT6779_REG_TEMP_OVER; 3583 reg_temp_over = NCT6779_REG_TEMP_OVER;
3559 reg_temp_hyst = NCT6779_REG_TEMP_HYST; 3584 reg_temp_hyst = NCT6779_REG_TEMP_HYST;
3560 reg_temp_config = NCT6779_REG_TEMP_CONFIG; 3585 reg_temp_config = NCT6779_REG_TEMP_CONFIG;
@@ -3603,8 +3628,8 @@ static int nct6775_probe(struct platform_device *pdev)
3603 data->REG_PWM[0] = NCT6775_REG_PWM; 3628 data->REG_PWM[0] = NCT6775_REG_PWM;
3604 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; 3629 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
3605 data->REG_PWM[2] = NCT6775_REG_FAN_STOP_OUTPUT; 3630 data->REG_PWM[2] = NCT6775_REG_FAN_STOP_OUTPUT;
3606 data->REG_PWM[5] = NCT6775_REG_WEIGHT_DUTY_STEP; 3631 data->REG_PWM[5] = NCT6791_REG_WEIGHT_DUTY_STEP;
3607 data->REG_PWM[6] = NCT6776_REG_WEIGHT_DUTY_BASE; 3632 data->REG_PWM[6] = NCT6791_REG_WEIGHT_DUTY_BASE;
3608 data->REG_PWM_READ = NCT6775_REG_PWM_READ; 3633 data->REG_PWM_READ = NCT6775_REG_PWM_READ;
3609 data->REG_PWM_MODE = NCT6776_REG_PWM_MODE; 3634 data->REG_PWM_MODE = NCT6776_REG_PWM_MODE;
3610 data->PWM_MODE_MASK = NCT6776_PWM_MODE_MASK; 3635 data->PWM_MODE_MASK = NCT6776_PWM_MODE_MASK;
@@ -3620,15 +3645,17 @@ static int nct6775_probe(struct platform_device *pdev)
3620 data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET; 3645 data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET;
3621 data->REG_TEMP_SOURCE = NCT6775_REG_TEMP_SOURCE; 3646 data->REG_TEMP_SOURCE = NCT6775_REG_TEMP_SOURCE;
3622 data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL; 3647 data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL;
3623 data->REG_WEIGHT_TEMP_SEL = NCT6775_REG_WEIGHT_TEMP_SEL; 3648 data->REG_WEIGHT_TEMP_SEL = NCT6791_REG_WEIGHT_TEMP_SEL;
3624 data->REG_WEIGHT_TEMP[0] = NCT6775_REG_WEIGHT_TEMP_STEP; 3649 data->REG_WEIGHT_TEMP[0] = NCT6791_REG_WEIGHT_TEMP_STEP;
3625 data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL; 3650 data->REG_WEIGHT_TEMP[1] = NCT6791_REG_WEIGHT_TEMP_STEP_TOL;
3626 data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE; 3651 data->REG_WEIGHT_TEMP[2] = NCT6791_REG_WEIGHT_TEMP_BASE;
3627 data->REG_ALARM = NCT6791_REG_ALARM; 3652 data->REG_ALARM = NCT6791_REG_ALARM;
3628 data->REG_BEEP = NCT6776_REG_BEEP; 3653 data->REG_BEEP = NCT6776_REG_BEEP;
3629 3654
3630 reg_temp = NCT6779_REG_TEMP; 3655 reg_temp = NCT6779_REG_TEMP;
3656 reg_temp_mon = NCT6779_REG_TEMP_MON;
3631 num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP); 3657 num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
3658 num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON);
3632 reg_temp_over = NCT6779_REG_TEMP_OVER; 3659 reg_temp_over = NCT6779_REG_TEMP_OVER;
3633 reg_temp_hyst = NCT6779_REG_TEMP_HYST; 3660 reg_temp_hyst = NCT6779_REG_TEMP_HYST;
3634 reg_temp_config = NCT6779_REG_TEMP_CONFIG; 3661 reg_temp_config = NCT6779_REG_TEMP_CONFIG;
@@ -3729,6 +3756,50 @@ static int nct6775_probe(struct platform_device *pdev)
3729 s++; 3756 s++;
3730 } 3757 }
3731 3758
3759 /*
3760 * Repeat with temperatures used for fan control.
3761 * This set of registers does not support limits.
3762 */
3763 for (i = 0; i < num_reg_temp_mon; i++) {
3764 if (reg_temp_mon[i] == 0)
3765 continue;
3766
3767 src = nct6775_read_value(data, data->REG_TEMP_SEL[i]) & 0x1f;
3768 if (!src || (mask & (1 << src)))
3769 continue;
3770
3771 if (src >= data->temp_label_num ||
3772 !strlen(data->temp_label[src])) {
3773 dev_info(dev,
3774 "Invalid temperature source %d at index %d, source register 0x%x, temp register 0x%x\n",
3775 src, i, data->REG_TEMP_SEL[i],
3776 reg_temp_mon[i]);
3777 continue;
3778 }
3779
3780 mask |= 1 << src;
3781
3782 /* Use fixed index for SYSTIN(1), CPUTIN(2), AUXTIN(3) */
3783 if (src <= data->temp_fixed_num) {
3784 if (data->have_temp & (1 << (src - 1)))
3785 continue;
3786 data->have_temp |= 1 << (src - 1);
3787 data->have_temp_fixed |= 1 << (src - 1);
3788 data->reg_temp[0][src - 1] = reg_temp_mon[i];
3789 data->temp_src[src - 1] = src;
3790 continue;
3791 }
3792
3793 if (s >= NUM_TEMP)
3794 continue;
3795
3796 /* Use dynamic index for other sources */
3797 data->have_temp |= 1 << s;
3798 data->reg_temp[0][s] = reg_temp_mon[i];
3799 data->temp_src[s] = src;
3800 s++;
3801 }
3802
3732#ifdef USE_ALTERNATE 3803#ifdef USE_ALTERNATE
3733 /* 3804 /*
3734 * Go through the list of alternate temp registers and enable 3805 * Go through the list of alternate temp registers and enable
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 1404e6319deb..72a889702f0d 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -141,6 +141,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
141{ 141{
142 if (rpm <= 0) 142 if (rpm <= 0)
143 return 255; 143 return 255;
144 if (rpm > 1350000)
145 return 1;
144 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 146 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
145} 147}
146 148
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 0e7017841f7d..aee14e2192f8 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -145,7 +145,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 };
145 */ 145 */
146static inline u8 FAN_TO_REG(long rpm, int div) 146static inline u8 FAN_TO_REG(long rpm, int div)
147{ 147{
148 if (rpm == 0) 148 if (rpm <= 0 || rpm > 1310720)
149 return 0; 149 return 0;
150 return clamp_val(1310720 / (rpm * div), 1, 255); 150 return clamp_val(1310720 / (rpm * div), 1, 255);
151} 151}
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index edb06cda5a68..6ed76ceb9270 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -481,9 +481,11 @@ store_pwm(struct device *dev, struct device_attribute *attr,
481 if (err) 481 if (err)
482 return err; 482 return err;
483 val = clamp_val(val, 0, 255); 483 val = clamp_val(val, 0, 255);
484 val = DIV_ROUND_CLOSEST(val, 0x11);
484 485
485 mutex_lock(&data->update_lock); 486 mutex_lock(&data->update_lock);
486 data->pwm[nr] = val; 487 data->pwm[nr] = val * 0x11;
488 val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0;
487 w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val); 489 w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val);
488 mutex_unlock(&data->update_lock); 490 mutex_unlock(&data->update_lock);
489 return count; 491 return count;
@@ -510,7 +512,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
510 mutex_lock(&data->update_lock); 512 mutex_lock(&data->update_lock);
511 reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG); 513 reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG);
512 data->pwm_enable[nr] = val; 514 data->pwm_enable[nr] = val;
513 reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]); 515 reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
514 reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr]; 516 reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr];
515 w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg); 517 w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg);
516 mutex_unlock(&data->update_lock); 518 mutex_unlock(&data->update_lock);
@@ -776,9 +778,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
776 ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1) 778 ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1)
777 ? 0 : 1; 779 ? 0 : 1;
778 data->pwm_enable[i] = 780 data->pwm_enable[i] =
779 ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1; 781 ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1;
780 data->pwm[i] = w83l786ng_read_value(client, 782 data->pwm[i] =
781 W83L786NG_REG_PWM[i]); 783 (w83l786ng_read_value(client, W83L786NG_REG_PWM[i])
784 & 0x0f) * 0x11;
782 } 785 }
783 786
784 787
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 036cf03aeb61..18a74a6751a9 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -20,7 +20,6 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/clk.h> 21#include <linux/clk.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/clk.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25 24
26/* Hardware register offsets and field defintions */ 25/* Hardware register offsets and field defintions */
@@ -891,7 +890,7 @@ static const struct of_device_id bcm_kona_i2c_of_match[] = {
891 {.compatible = "brcm,kona-i2c",}, 890 {.compatible = "brcm,kona-i2c",},
892 {}, 891 {},
893}; 892};
894MODULE_DEVICE_TABLE(of, kona_i2c_of_match); 893MODULE_DEVICE_TABLE(of, bcm_kona_i2c_of_match);
895 894
896static struct platform_driver bcm_kona_i2c_driver = { 895static struct platform_driver bcm_kona_i2c_driver = {
897 .driver = { 896 .driver = {
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index d7e8600f31fb..77df97b932af 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -299,6 +299,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
299 strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name)); 299 strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name));
300 adap->algo = &bcm2835_i2c_algo; 300 adap->algo = &bcm2835_i2c_algo;
301 adap->dev.parent = &pdev->dev; 301 adap->dev.parent = &pdev->dev;
302 adap->dev.of_node = pdev->dev.of_node;
302 303
303 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); 304 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
304 305
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index ff05d9fef4a8..af0b5830303d 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -125,12 +125,12 @@ static struct davinci_i2c_platform_data davinci_i2c_platform_data_default = {
125static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev, 125static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev,
126 int reg, u16 val) 126 int reg, u16 val)
127{ 127{
128 __raw_writew(val, i2c_dev->base + reg); 128 writew_relaxed(val, i2c_dev->base + reg);
129} 129}
130 130
131static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg) 131static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg)
132{ 132{
133 return __raw_readw(i2c_dev->base + reg); 133 return readw_relaxed(i2c_dev->base + reg);
134} 134}
135 135
136/* Generate a pulse on the i2c clock pin. */ 136/* Generate a pulse on the i2c clock pin. */
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index dae3ddfe7619..721f7ebf9a3b 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -25,8 +25,6 @@
25#define USB_VENDOR_ID_DIOLAN 0x0abf 25#define USB_VENDOR_ID_DIOLAN 0x0abf
26#define USB_DEVICE_ID_DIOLAN_U2C 0x3370 26#define USB_DEVICE_ID_DIOLAN_U2C 0x3370
27 27
28#define DIOLAN_OUT_EP 0x02
29#define DIOLAN_IN_EP 0x84
30 28
31/* commands via USB, must match command ids in the firmware */ 29/* commands via USB, must match command ids in the firmware */
32#define CMD_I2C_READ 0x01 30#define CMD_I2C_READ 0x01
@@ -84,6 +82,7 @@
84struct i2c_diolan_u2c { 82struct i2c_diolan_u2c {
85 u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */ 83 u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */
86 u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */ 84 u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */
85 int ep_in, ep_out; /* Endpoints */
87 struct usb_device *usb_dev; /* the usb device for this device */ 86 struct usb_device *usb_dev; /* the usb device for this device */
88 struct usb_interface *interface;/* the interface for this device */ 87 struct usb_interface *interface;/* the interface for this device */
89 struct i2c_adapter adapter; /* i2c related things */ 88 struct i2c_adapter adapter; /* i2c related things */
@@ -109,7 +108,7 @@ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
109 return -EINVAL; 108 return -EINVAL;
110 109
111 ret = usb_bulk_msg(dev->usb_dev, 110 ret = usb_bulk_msg(dev->usb_dev,
112 usb_sndbulkpipe(dev->usb_dev, DIOLAN_OUT_EP), 111 usb_sndbulkpipe(dev->usb_dev, dev->ep_out),
113 dev->obuffer, dev->olen, &actual, 112 dev->obuffer, dev->olen, &actual,
114 DIOLAN_USB_TIMEOUT); 113 DIOLAN_USB_TIMEOUT);
115 if (!ret) { 114 if (!ret) {
@@ -118,7 +117,7 @@ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
118 117
119 tmpret = usb_bulk_msg(dev->usb_dev, 118 tmpret = usb_bulk_msg(dev->usb_dev,
120 usb_rcvbulkpipe(dev->usb_dev, 119 usb_rcvbulkpipe(dev->usb_dev,
121 DIOLAN_IN_EP), 120 dev->ep_in),
122 dev->ibuffer, 121 dev->ibuffer,
123 sizeof(dev->ibuffer), &actual, 122 sizeof(dev->ibuffer), &actual,
124 DIOLAN_USB_TIMEOUT); 123 DIOLAN_USB_TIMEOUT);
@@ -210,7 +209,7 @@ static void diolan_flush_input(struct i2c_diolan_u2c *dev)
210 int ret; 209 int ret;
211 210
212 ret = usb_bulk_msg(dev->usb_dev, 211 ret = usb_bulk_msg(dev->usb_dev,
213 usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP), 212 usb_rcvbulkpipe(dev->usb_dev, dev->ep_in),
214 dev->ibuffer, sizeof(dev->ibuffer), &actual, 213 dev->ibuffer, sizeof(dev->ibuffer), &actual,
215 DIOLAN_USB_TIMEOUT); 214 DIOLAN_USB_TIMEOUT);
216 if (ret < 0 || actual == 0) 215 if (ret < 0 || actual == 0)
@@ -445,9 +444,14 @@ static void diolan_u2c_free(struct i2c_diolan_u2c *dev)
445static int diolan_u2c_probe(struct usb_interface *interface, 444static int diolan_u2c_probe(struct usb_interface *interface,
446 const struct usb_device_id *id) 445 const struct usb_device_id *id)
447{ 446{
447 struct usb_host_interface *hostif = interface->cur_altsetting;
448 struct i2c_diolan_u2c *dev; 448 struct i2c_diolan_u2c *dev;
449 int ret; 449 int ret;
450 450
451 if (hostif->desc.bInterfaceNumber != 0
452 || hostif->desc.bNumEndpoints < 2)
453 return -ENODEV;
454
451 /* allocate memory for our device state and initialize it */ 455 /* allocate memory for our device state and initialize it */
452 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 456 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
453 if (dev == NULL) { 457 if (dev == NULL) {
@@ -455,6 +459,8 @@ static int diolan_u2c_probe(struct usb_interface *interface,
455 ret = -ENOMEM; 459 ret = -ENOMEM;
456 goto error; 460 goto error;
457 } 461 }
462 dev->ep_out = hostif->endpoint[0].desc.bEndpointAddress;
463 dev->ep_in = hostif->endpoint[1].desc.bEndpointAddress;
458 464
459 dev->usb_dev = usb_get_dev(interface_to_usbdev(interface)); 465 dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
460 dev->interface = interface; 466 dev->interface = interface;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1d7efa3169cd..d0cfbb4cb964 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -312,7 +312,9 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
312 312
313 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); 313 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
314 314
315 clk_prepare_enable(i2c_imx->clk); 315 result = clk_prepare_enable(i2c_imx->clk);
316 if (result)
317 return result;
316 imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR); 318 imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR);
317 /* Enable I2C controller */ 319 /* Enable I2C controller */
318 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); 320 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a6a891d7970d..90dcc2eaac5f 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -266,13 +266,13 @@ static const u8 reg_map_ip_v2[] = {
266static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev, 266static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
267 int reg, u16 val) 267 int reg, u16 val)
268{ 268{
269 __raw_writew(val, i2c_dev->base + 269 writew_relaxed(val, i2c_dev->base +
270 (i2c_dev->regs[reg] << i2c_dev->reg_shift)); 270 (i2c_dev->regs[reg] << i2c_dev->reg_shift));
271} 271}
272 272
273static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg) 273static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
274{ 274{
275 return __raw_readw(i2c_dev->base + 275 return readw_relaxed(i2c_dev->base +
276 (i2c_dev->regs[reg] << i2c_dev->reg_shift)); 276 (i2c_dev->regs[reg] << i2c_dev->reg_shift));
277} 277}
278 278
@@ -1037,6 +1037,20 @@ static const struct i2c_algorithm omap_i2c_algo = {
1037}; 1037};
1038 1038
1039#ifdef CONFIG_OF 1039#ifdef CONFIG_OF
1040static struct omap_i2c_bus_platform_data omap2420_pdata = {
1041 .rev = OMAP_I2C_IP_VERSION_1,
1042 .flags = OMAP_I2C_FLAG_NO_FIFO |
1043 OMAP_I2C_FLAG_SIMPLE_CLOCK |
1044 OMAP_I2C_FLAG_16BIT_DATA_REG |
1045 OMAP_I2C_FLAG_BUS_SHIFT_2,
1046};
1047
1048static struct omap_i2c_bus_platform_data omap2430_pdata = {
1049 .rev = OMAP_I2C_IP_VERSION_1,
1050 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2 |
1051 OMAP_I2C_FLAG_FORCE_19200_INT_CLK,
1052};
1053
1040static struct omap_i2c_bus_platform_data omap3_pdata = { 1054static struct omap_i2c_bus_platform_data omap3_pdata = {
1041 .rev = OMAP_I2C_IP_VERSION_1, 1055 .rev = OMAP_I2C_IP_VERSION_1,
1042 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2, 1056 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
@@ -1055,6 +1069,14 @@ static const struct of_device_id omap_i2c_of_match[] = {
1055 .compatible = "ti,omap3-i2c", 1069 .compatible = "ti,omap3-i2c",
1056 .data = &omap3_pdata, 1070 .data = &omap3_pdata,
1057 }, 1071 },
1072 {
1073 .compatible = "ti,omap2430-i2c",
1074 .data = &omap2430_pdata,
1075 },
1076 {
1077 .compatible = "ti,omap2420-i2c",
1078 .data = &omap2420_pdata,
1079 },
1058 { }, 1080 { },
1059}; 1081};
1060MODULE_DEVICE_TABLE(of, omap_i2c_of_match); 1082MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
@@ -1140,9 +1162,9 @@ omap_i2c_probe(struct platform_device *pdev)
1140 * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2. 1162 * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
1141 * On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset. 1163 * On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset.
1142 * Also since the omap_i2c_read_reg uses reg_map_ip_* a 1164 * Also since the omap_i2c_read_reg uses reg_map_ip_* a
1143 * raw_readw is done. 1165 * readw_relaxed is done.
1144 */ 1166 */
1145 rev = __raw_readw(dev->base + 0x04); 1167 rev = readw_relaxed(dev->base + 0x04);
1146 1168
1147 dev->scheme = OMAP_I2C_SCHEME(rev); 1169 dev->scheme = OMAP_I2C_SCHEME(rev);
1148 switch (dev->scheme) { 1170 switch (dev->scheme) {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 5923cfa390c8..d74c0b34248e 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -615,6 +615,22 @@ void i2c_unlock_adapter(struct i2c_adapter *adapter)
615} 615}
616EXPORT_SYMBOL_GPL(i2c_unlock_adapter); 616EXPORT_SYMBOL_GPL(i2c_unlock_adapter);
617 617
618static void i2c_dev_set_name(struct i2c_adapter *adap,
619 struct i2c_client *client)
620{
621 struct acpi_device *adev = ACPI_COMPANION(&client->dev);
622
623 if (adev) {
624 dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev));
625 return;
626 }
627
628 /* For 10-bit clients, add an arbitrary offset to avoid collisions */
629 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
630 client->addr | ((client->flags & I2C_CLIENT_TEN)
631 ? 0xa000 : 0));
632}
633
618/** 634/**
619 * i2c_new_device - instantiate an i2c device 635 * i2c_new_device - instantiate an i2c device
620 * @adap: the adapter managing the device 636 * @adap: the adapter managing the device
@@ -671,12 +687,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
671 client->dev.bus = &i2c_bus_type; 687 client->dev.bus = &i2c_bus_type;
672 client->dev.type = &i2c_client_type; 688 client->dev.type = &i2c_client_type;
673 client->dev.of_node = info->of_node; 689 client->dev.of_node = info->of_node;
674 ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle); 690 ACPI_COMPANION_SET(&client->dev, info->acpi_node.companion);
675 691
676 /* For 10-bit clients, add an arbitrary offset to avoid collisions */ 692 i2c_dev_set_name(adap, client);
677 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
678 client->addr | ((client->flags & I2C_CLIENT_TEN)
679 ? 0xa000 : 0));
680 status = device_register(&client->dev); 693 status = device_register(&client->dev);
681 if (status) 694 if (status)
682 goto out_err; 695 goto out_err;
@@ -1100,7 +1113,7 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
1100 return AE_OK; 1113 return AE_OK;
1101 1114
1102 memset(&info, 0, sizeof(info)); 1115 memset(&info, 0, sizeof(info));
1103 info.acpi_node.handle = handle; 1116 info.acpi_node.companion = adev;
1104 info.irq = -1; 1117 info.irq = -1;
1105 1118
1106 INIT_LIST_HEAD(&resource_list); 1119 INIT_LIST_HEAD(&resource_list);
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 797e3117bef7..2d0847b6be62 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -139,6 +139,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
139 priv->adap.algo = &priv->algo; 139 priv->adap.algo = &priv->algo;
140 priv->adap.algo_data = priv; 140 priv->adap.algo_data = priv;
141 priv->adap.dev.parent = &parent->dev; 141 priv->adap.dev.parent = &parent->dev;
142 priv->adap.retries = parent->retries;
143 priv->adap.timeout = parent->timeout;
142 144
143 /* Sanity check on class */ 145 /* Sanity check on class */
144 if (i2c_mux_parent_classes(parent) & class) 146 if (i2c_mux_parent_classes(parent) & class)
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 140c8ef50529..d9e1f7ccfe6f 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 2006 Hannes Reinecke 7 * Copyright (C) 2006 Hannes Reinecke
8 */ 8 */
9 9
10#include <linux/acpi.h>
10#include <linux/ata.h> 11#include <linux/ata.h>
11#include <linux/delay.h> 12#include <linux/delay.h>
12#include <linux/device.h> 13#include <linux/device.h>
@@ -19,8 +20,6 @@
19#include <linux/dmi.h> 20#include <linux/dmi.h>
20#include <linux/module.h> 21#include <linux/module.h>
21 22
22#include <acpi/acpi_bus.h>
23
24#define REGS_PER_GTF 7 23#define REGS_PER_GTF 7
25 24
26struct GTM_buffer { 25struct GTM_buffer {
@@ -128,7 +127,7 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle,
128 127
129 DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func); 128 DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func);
130 129
131 dev_handle = DEVICE_ACPI_HANDLE(dev); 130 dev_handle = ACPI_HANDLE(dev);
132 if (!dev_handle) { 131 if (!dev_handle) {
133 DEBPRINT("no acpi handle for device\n"); 132 DEBPRINT("no acpi handle for device\n");
134 goto err; 133 goto err;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3226ce98fb18..92d1206482a6 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * intel_idle.c - native hardware idle loop for modern Intel processors 2 * intel_idle.c - native hardware idle loop for modern Intel processors
3 * 3 *
4 * Copyright (c) 2010, Intel Corporation. 4 * Copyright (c) 2013, Intel Corporation.
5 * Len Brown <len.brown@intel.com> 5 * Len Brown <len.brown@intel.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
@@ -329,6 +329,22 @@ static struct cpuidle_state atom_cstates[] __initdata = {
329 { 329 {
330 .enter = NULL } 330 .enter = NULL }
331}; 331};
332static struct cpuidle_state avn_cstates[] __initdata = {
333 {
334 .name = "C1-AVN",
335 .desc = "MWAIT 0x00",
336 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
337 .exit_latency = 2,
338 .target_residency = 2,
339 .enter = &intel_idle },
340 {
341 .name = "C6-AVN",
342 .desc = "MWAIT 0x51",
343 .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
344 .exit_latency = 15,
345 .target_residency = 45,
346 .enter = &intel_idle },
347};
332 348
333/** 349/**
334 * intel_idle 350 * intel_idle
@@ -462,6 +478,11 @@ static const struct idle_cpu idle_cpu_hsw = {
462 .disable_promotion_to_c1e = true, 478 .disable_promotion_to_c1e = true,
463}; 479};
464 480
481static const struct idle_cpu idle_cpu_avn = {
482 .state_table = avn_cstates,
483 .disable_promotion_to_c1e = true,
484};
485
465#define ICPU(model, cpu) \ 486#define ICPU(model, cpu) \
466 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } 487 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
467 488
@@ -483,6 +504,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
483 ICPU(0x3f, idle_cpu_hsw), 504 ICPU(0x3f, idle_cpu_hsw),
484 ICPU(0x45, idle_cpu_hsw), 505 ICPU(0x45, idle_cpu_hsw),
485 ICPU(0x46, idle_cpu_hsw), 506 ICPU(0x46, idle_cpu_hsw),
507 ICPU(0x4D, idle_cpu_avn),
486 {} 508 {}
487}; 509};
488MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); 510MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index dcda17395c4e..1cae4e920c9b 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -350,7 +350,7 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
350error_iio_unreg: 350error_iio_unreg:
351 iio_device_unregister(indio_dev); 351 iio_device_unregister(indio_dev);
352error_remove_trigger: 352error_remove_trigger:
353 hid_sensor_remove_trigger(indio_dev); 353 hid_sensor_remove_trigger(&accel_state->common_attributes);
354error_unreg_buffer_funcs: 354error_unreg_buffer_funcs:
355 iio_triggered_buffer_cleanup(indio_dev); 355 iio_triggered_buffer_cleanup(indio_dev);
356error_free_dev_mem: 356error_free_dev_mem:
@@ -363,10 +363,11 @@ static int hid_accel_3d_remove(struct platform_device *pdev)
363{ 363{
364 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; 364 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
365 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 365 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
366 struct accel_3d_state *accel_state = iio_priv(indio_dev);
366 367
367 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D); 368 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D);
368 iio_device_unregister(indio_dev); 369 iio_device_unregister(indio_dev);
369 hid_sensor_remove_trigger(indio_dev); 370 hid_sensor_remove_trigger(&accel_state->common_attributes);
370 iio_triggered_buffer_cleanup(indio_dev); 371 iio_triggered_buffer_cleanup(indio_dev);
371 kfree(indio_dev->channels); 372 kfree(indio_dev->channels);
372 373
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index d72118d1189c..98ba761cbb9c 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -112,9 +112,10 @@ static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
112 mutex_lock(&st->buf_lock); 112 mutex_lock(&st->buf_lock);
113 st->tx[0] = KXSD9_READ(address); 113 st->tx[0] = KXSD9_READ(address);
114 ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers)); 114 ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
115 if (ret) 115 if (!ret)
116 return ret; 116 ret = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
117 return (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0); 117 mutex_unlock(&st->buf_lock);
118 return ret;
118} 119}
119 120
120static IIO_CONST_ATTR(accel_scale_available, 121static IIO_CONST_ATTR(accel_scale_available,
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 17df74908db1..5b1aa027c034 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -1047,6 +1047,7 @@ static int at91_adc_probe(struct platform_device *pdev)
1047 } else { 1047 } else {
1048 if (!st->caps->has_tsmr) { 1048 if (!st->caps->has_tsmr) {
1049 dev_err(&pdev->dev, "We don't support non-TSMR adc\n"); 1049 dev_err(&pdev->dev, "We don't support non-TSMR adc\n");
1050 ret = -ENODEV;
1050 goto error_disable_adc_clk; 1051 goto error_disable_adc_clk;
1051 } 1052 }
1052 1053
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 12948325431c..c8c1baaec6c1 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -88,10 +88,10 @@ static const int mcp3422_sample_rates[4] = {
88 88
89/* sample rates to sign extension table */ 89/* sample rates to sign extension table */
90static const int mcp3422_sign_extend[4] = { 90static const int mcp3422_sign_extend[4] = {
91 [MCP3422_SRATE_240] = 12, 91 [MCP3422_SRATE_240] = 11,
92 [MCP3422_SRATE_60] = 14, 92 [MCP3422_SRATE_60] = 13,
93 [MCP3422_SRATE_15] = 16, 93 [MCP3422_SRATE_15] = 15,
94 [MCP3422_SRATE_3] = 18 }; 94 [MCP3422_SRATE_3] = 17 };
95 95
96/* Client data (each client gets its own) */ 96/* Client data (each client gets its own) */
97struct mcp3422 { 97struct mcp3422 {
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 728411ec7642..d4d748214e4b 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -229,12 +229,15 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
229 unsigned long flags, 229 unsigned long flags,
230 const struct iio_buffer_setup_ops *setup_ops) 230 const struct iio_buffer_setup_ops *setup_ops)
231{ 231{
232 struct iio_buffer *buffer;
232 int ret; 233 int ret;
233 234
234 indio_dev->buffer = iio_kfifo_allocate(indio_dev); 235 buffer = iio_kfifo_allocate(indio_dev);
235 if (!indio_dev->buffer) 236 if (!buffer)
236 return -ENOMEM; 237 return -ENOMEM;
237 238
239 iio_device_attach_buffer(indio_dev, buffer);
240
238 ret = request_threaded_irq(irq, pollfunc_th, pollfunc_bh, 241 ret = request_threaded_irq(irq, pollfunc_th, pollfunc_bh,
239 flags, indio_dev->name, indio_dev); 242 flags, indio_dev->name, indio_dev);
240 if (ret) 243 if (ret)
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
index 1178121b55b0..39188b72cd3b 100644
--- a/drivers/iio/common/hid-sensors/Kconfig
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -25,13 +25,4 @@ config HID_SENSOR_IIO_TRIGGER
25 If this driver is compiled as a module, it will be named 25 If this driver is compiled as a module, it will be named
26 hid-sensor-trigger. 26 hid-sensor-trigger.
27 27
28config HID_SENSOR_ENUM_BASE_QUIRKS
29 bool "ENUM base quirks for HID Sensor IIO drivers"
30 depends on HID_SENSOR_IIO_COMMON
31 help
32 Say yes here to build support for sensor hub FW using
33 enumeration, which is using 1 as base instead of 0.
34 Since logical minimum is still set 0 instead of 1,
35 there is no easy way to differentiate.
36
37endmenu 28endmenu
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index b6e77e0fc420..7dcf83998e6f 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -33,33 +33,42 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
33{ 33{
34 struct hid_sensor_common *st = iio_trigger_get_drvdata(trig); 34 struct hid_sensor_common *st = iio_trigger_get_drvdata(trig);
35 int state_val; 35 int state_val;
36 int report_val;
36 37
37 if (state) { 38 if (state) {
38 if (sensor_hub_device_open(st->hsdev)) 39 if (sensor_hub_device_open(st->hsdev))
39 return -EIO; 40 return -EIO;
40 } else 41 state_val =
42 HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM;
43 report_val =
44 HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM;
45
46 } else {
41 sensor_hub_device_close(st->hsdev); 47 sensor_hub_device_close(st->hsdev);
48 state_val =
49 HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM;
50 report_val =
51 HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM;
52 }
42 53
43 state_val = state ? 1 : 0;
44 if (IS_ENABLED(CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS))
45 ++state_val;
46 st->data_ready = state; 54 st->data_ready = state;
55 state_val += st->power_state.logical_minimum;
56 report_val += st->report_state.logical_minimum;
47 sensor_hub_set_feature(st->hsdev, st->power_state.report_id, 57 sensor_hub_set_feature(st->hsdev, st->power_state.report_id,
48 st->power_state.index, 58 st->power_state.index,
49 (s32)state_val); 59 (s32)state_val);
50 60
51 sensor_hub_set_feature(st->hsdev, st->report_state.report_id, 61 sensor_hub_set_feature(st->hsdev, st->report_state.report_id,
52 st->report_state.index, 62 st->report_state.index,
53 (s32)state_val); 63 (s32)report_val);
54 64
55 return 0; 65 return 0;
56} 66}
57 67
58void hid_sensor_remove_trigger(struct iio_dev *indio_dev) 68void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
59{ 69{
60 iio_trigger_unregister(indio_dev->trig); 70 iio_trigger_unregister(attrb->trigger);
61 iio_trigger_free(indio_dev->trig); 71 iio_trigger_free(attrb->trigger);
62 indio_dev->trig = NULL;
63} 72}
64EXPORT_SYMBOL(hid_sensor_remove_trigger); 73EXPORT_SYMBOL(hid_sensor_remove_trigger);
65 74
@@ -90,7 +99,7 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
90 dev_err(&indio_dev->dev, "Trigger Register Failed\n"); 99 dev_err(&indio_dev->dev, "Trigger Register Failed\n");
91 goto error_free_trig; 100 goto error_free_trig;
92 } 101 }
93 indio_dev->trig = trig; 102 indio_dev->trig = attrb->trigger = trig;
94 103
95 return ret; 104 return ret;
96 105
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
index 9a8731478eda..ca02f7811aa8 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -21,6 +21,6 @@
21 21
22int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, 22int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
23 struct hid_sensor_common *attrb); 23 struct hid_sensor_common *attrb);
24void hid_sensor_remove_trigger(struct iio_dev *indio_dev); 24void hid_sensor_remove_trigger(struct hid_sensor_common *attrb);
25 25
26#endif 26#endif
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index ea01c6bcfb56..e54f0f4959d3 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -348,7 +348,7 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
348error_iio_unreg: 348error_iio_unreg:
349 iio_device_unregister(indio_dev); 349 iio_device_unregister(indio_dev);
350error_remove_trigger: 350error_remove_trigger:
351 hid_sensor_remove_trigger(indio_dev); 351 hid_sensor_remove_trigger(&gyro_state->common_attributes);
352error_unreg_buffer_funcs: 352error_unreg_buffer_funcs:
353 iio_triggered_buffer_cleanup(indio_dev); 353 iio_triggered_buffer_cleanup(indio_dev);
354error_free_dev_mem: 354error_free_dev_mem:
@@ -361,10 +361,11 @@ static int hid_gyro_3d_remove(struct platform_device *pdev)
361{ 361{
362 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; 362 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
363 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 363 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
364 struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
364 365
365 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D); 366 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D);
366 iio_device_unregister(indio_dev); 367 iio_device_unregister(indio_dev);
367 hid_sensor_remove_trigger(indio_dev); 368 hid_sensor_remove_trigger(&gyro_state->common_attributes);
368 iio_triggered_buffer_cleanup(indio_dev); 369 iio_triggered_buffer_cleanup(indio_dev);
369 kfree(indio_dev->channels); 370 kfree(indio_dev->channels);
370 371
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index f98c2b509254..a022f27c6690 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -43,6 +43,7 @@ config GP2AP020A00F
43 depends on I2C 43 depends on I2C
44 select IIO_BUFFER 44 select IIO_BUFFER
45 select IIO_TRIGGERED_BUFFER 45 select IIO_TRIGGERED_BUFFER
46 select IRQ_WORK
46 help 47 help
47 Say Y here if you have a Sharp GP2AP020A00F proximity/ALS combo-chip 48 Say Y here if you have a Sharp GP2AP020A00F proximity/ALS combo-chip
48 hooked to an I2C bus. 49 hooked to an I2C bus.
@@ -81,6 +82,8 @@ config SENSORS_LM3533
81config TCS3472 82config TCS3472
82 tristate "TAOS TCS3472 color light-to-digital converter" 83 tristate "TAOS TCS3472 color light-to-digital converter"
83 depends on I2C 84 depends on I2C
85 select IIO_BUFFER
86 select IIO_TRIGGERED_BUFFER
84 help 87 help
85 If you say yes here you get support for the TAOS TCS3472 88 If you say yes here you get support for the TAOS TCS3472
86 family of color light-to-digital converters with IR filter. 89 family of color light-to-digital converters with IR filter.
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index fa6ae8cf89ea..8e8b9d722853 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -314,7 +314,7 @@ static int hid_als_probe(struct platform_device *pdev)
314error_iio_unreg: 314error_iio_unreg:
315 iio_device_unregister(indio_dev); 315 iio_device_unregister(indio_dev);
316error_remove_trigger: 316error_remove_trigger:
317 hid_sensor_remove_trigger(indio_dev); 317 hid_sensor_remove_trigger(&als_state->common_attributes);
318error_unreg_buffer_funcs: 318error_unreg_buffer_funcs:
319 iio_triggered_buffer_cleanup(indio_dev); 319 iio_triggered_buffer_cleanup(indio_dev);
320error_free_dev_mem: 320error_free_dev_mem:
@@ -327,10 +327,11 @@ static int hid_als_remove(struct platform_device *pdev)
327{ 327{
328 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; 328 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
329 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 329 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
330 struct als_state *als_state = iio_priv(indio_dev);
330 331
331 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS); 332 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS);
332 iio_device_unregister(indio_dev); 333 iio_device_unregister(indio_dev);
333 hid_sensor_remove_trigger(indio_dev); 334 hid_sensor_remove_trigger(&als_state->common_attributes);
334 iio_triggered_buffer_cleanup(indio_dev); 335 iio_triggered_buffer_cleanup(indio_dev);
335 kfree(indio_dev->channels); 336 kfree(indio_dev->channels);
336 337
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 0cf09637b35b..d86d226dcd67 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -19,6 +19,8 @@ config AK8975
19config MAG3110 19config MAG3110
20 tristate "Freescale MAG3110 3-Axis Magnetometer" 20 tristate "Freescale MAG3110 3-Axis Magnetometer"
21 depends on I2C 21 depends on I2C
22 select IIO_BUFFER
23 select IIO_TRIGGERED_BUFFER
22 help 24 help
23 Say yes here to build support for the Freescale MAG3110 3-Axis 25 Say yes here to build support for the Freescale MAG3110 3-Axis
24 magnetometer. 26 magnetometer.
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 2634920562fb..b26e1028a0a0 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -351,7 +351,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
351error_iio_unreg: 351error_iio_unreg:
352 iio_device_unregister(indio_dev); 352 iio_device_unregister(indio_dev);
353error_remove_trigger: 353error_remove_trigger:
354 hid_sensor_remove_trigger(indio_dev); 354 hid_sensor_remove_trigger(&magn_state->common_attributes);
355error_unreg_buffer_funcs: 355error_unreg_buffer_funcs:
356 iio_triggered_buffer_cleanup(indio_dev); 356 iio_triggered_buffer_cleanup(indio_dev);
357error_free_dev_mem: 357error_free_dev_mem:
@@ -364,10 +364,11 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
364{ 364{
365 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; 365 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
366 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 366 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
367 struct magn_3d_state *magn_state = iio_priv(indio_dev);
367 368
368 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D); 369 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
369 iio_device_unregister(indio_dev); 370 iio_device_unregister(indio_dev);
370 hid_sensor_remove_trigger(indio_dev); 371 hid_sensor_remove_trigger(&magn_state->common_attributes);
371 iio_triggered_buffer_cleanup(indio_dev); 372 iio_triggered_buffer_cleanup(indio_dev);
372 kfree(indio_dev->channels); 373 kfree(indio_dev->channels);
373 374
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 783c5b417356..becf54496967 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -250,7 +250,12 @@ done:
250 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ 250 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
251 BIT(IIO_CHAN_INFO_SCALE), \ 251 BIT(IIO_CHAN_INFO_SCALE), \
252 .scan_index = idx, \ 252 .scan_index = idx, \
253 .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ 253 .scan_type = { \
254 .sign = 's', \
255 .realbits = 16, \
256 .storagebits = 16, \
257 .endianness = IIO_BE, \
258 }, \
254} 259}
255 260
256static const struct iio_chan_spec mag3110_channels[] = { 261static const struct iio_chan_spec mag3110_channels[] = {
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6df23502059a..6be57c38638d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -22,6 +22,7 @@
22#include <linux/socket.h> 22#include <linux/socket.h>
23#include <linux/in.h> 23#include <linux/in.h>
24#include <linux/in6.h> 24#include <linux/in6.h>
25#include <linux/llist.h>
25#include <rdma/ib_verbs.h> 26#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h> 27#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h> 28#include <target/target_core_base.h>
@@ -489,6 +490,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
489 kref_init(&isert_conn->conn_kref); 490 kref_init(&isert_conn->conn_kref);
490 kref_get(&isert_conn->conn_kref); 491 kref_get(&isert_conn->conn_kref);
491 mutex_init(&isert_conn->conn_mutex); 492 mutex_init(&isert_conn->conn_mutex);
493 mutex_init(&isert_conn->conn_comp_mutex);
492 spin_lock_init(&isert_conn->conn_lock); 494 spin_lock_init(&isert_conn->conn_lock);
493 495
494 cma_id->context = isert_conn; 496 cma_id->context = isert_conn;
@@ -843,14 +845,32 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
843} 845}
844 846
845static void 847static void
846isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr) 848isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
849 struct ib_send_wr *send_wr, bool coalesce)
847{ 850{
851 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
852
848 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; 853 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
849 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 854 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
850 send_wr->opcode = IB_WR_SEND; 855 send_wr->opcode = IB_WR_SEND;
851 send_wr->send_flags = IB_SEND_SIGNALED; 856 send_wr->sg_list = &tx_desc->tx_sg[0];
852 send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
853 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 857 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
858 /*
859 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
860 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
861 */
862 mutex_lock(&isert_conn->conn_comp_mutex);
863 if (coalesce &&
864 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
865 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
866 mutex_unlock(&isert_conn->conn_comp_mutex);
867 return;
868 }
869 isert_conn->conn_comp_batch = 0;
870 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
871 mutex_unlock(&isert_conn->conn_comp_mutex);
872
873 send_wr->send_flags = IB_SEND_SIGNALED;
854} 874}
855 875
856static int 876static int
@@ -1582,8 +1602,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1582} 1602}
1583 1603
1584static void 1604static void
1585isert_send_completion(struct iser_tx_desc *tx_desc, 1605__isert_send_completion(struct iser_tx_desc *tx_desc,
1586 struct isert_conn *isert_conn) 1606 struct isert_conn *isert_conn)
1587{ 1607{
1588 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1608 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1589 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1609 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
@@ -1624,6 +1644,24 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
1624} 1644}
1625 1645
1626static void 1646static void
1647isert_send_completion(struct iser_tx_desc *tx_desc,
1648 struct isert_conn *isert_conn)
1649{
1650 struct llist_node *llnode = tx_desc->comp_llnode_batch;
1651 struct iser_tx_desc *t;
1652 /*
1653 * Drain coalesced completion llist starting from comp_llnode_batch
1654 * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1655 */
1656 while (llnode) {
1657 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1658 llnode = llist_next(llnode);
1659 __isert_send_completion(t, isert_conn);
1660 }
1661 __isert_send_completion(tx_desc, isert_conn);
1662}
1663
1664static void
1627isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1665isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1628{ 1666{
1629 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1667 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
@@ -1793,7 +1831,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1793 isert_cmd->tx_desc.num_sge = 2; 1831 isert_cmd->tx_desc.num_sge = 2;
1794 } 1832 }
1795 1833
1796 isert_init_send_wr(isert_cmd, send_wr); 1834 isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
1797 1835
1798 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1836 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1799 1837
@@ -1813,7 +1851,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1813 &isert_cmd->tx_desc.iscsi_header, 1851 &isert_cmd->tx_desc.iscsi_header,
1814 nopout_response); 1852 nopout_response);
1815 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1853 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1816 isert_init_send_wr(isert_cmd, send_wr); 1854 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1817 1855
1818 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1856 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1819 1857
@@ -1831,7 +1869,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1831 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 1869 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1832 &isert_cmd->tx_desc.iscsi_header); 1870 &isert_cmd->tx_desc.iscsi_header);
1833 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1871 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1834 isert_init_send_wr(isert_cmd, send_wr); 1872 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1835 1873
1836 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1874 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1837 1875
@@ -1849,7 +1887,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1849 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 1887 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1850 &isert_cmd->tx_desc.iscsi_header); 1888 &isert_cmd->tx_desc.iscsi_header);
1851 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1889 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1852 isert_init_send_wr(isert_cmd, send_wr); 1890 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1853 1891
1854 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1892 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1855 1893
@@ -1881,7 +1919,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1881 tx_dsg->lkey = isert_conn->conn_mr->lkey; 1919 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1882 isert_cmd->tx_desc.num_sge = 2; 1920 isert_cmd->tx_desc.num_sge = 2;
1883 1921
1884 isert_init_send_wr(isert_cmd, send_wr); 1922 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1885 1923
1886 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1924 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1887 1925
@@ -1921,7 +1959,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1921 tx_dsg->lkey = isert_conn->conn_mr->lkey; 1959 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1922 isert_cmd->tx_desc.num_sge = 2; 1960 isert_cmd->tx_desc.num_sge = 2;
1923 } 1961 }
1924 isert_init_send_wr(isert_cmd, send_wr); 1962 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1925 1963
1926 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1964 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1927 1965
@@ -1991,8 +2029,6 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1991 2029
1992 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2030 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1993 data_left = se_cmd->data_length; 2031 data_left = se_cmd->data_length;
1994 iscsit_increment_maxcmdsn(cmd, conn->sess);
1995 cmd->stat_sn = conn->stat_sn++;
1996 } else { 2032 } else {
1997 sg_off = cmd->write_data_done / PAGE_SIZE; 2033 sg_off = cmd->write_data_done / PAGE_SIZE;
1998 data_left = se_cmd->data_length - cmd->write_data_done; 2034 data_left = se_cmd->data_length - cmd->write_data_done;
@@ -2204,8 +2240,6 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2204 2240
2205 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2241 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2206 data_left = se_cmd->data_length; 2242 data_left = se_cmd->data_length;
2207 iscsit_increment_maxcmdsn(cmd, conn->sess);
2208 cmd->stat_sn = conn->stat_sn++;
2209 } else { 2243 } else {
2210 sg_off = cmd->write_data_done / PAGE_SIZE; 2244 sg_off = cmd->write_data_done / PAGE_SIZE;
2211 data_left = se_cmd->data_length - cmd->write_data_done; 2245 data_left = se_cmd->data_length - cmd->write_data_done;
@@ -2259,18 +2293,26 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2259 data_len = min(data_left, rdma_write_max); 2293 data_len = min(data_left, rdma_write_max);
2260 wr->cur_rdma_length = data_len; 2294 wr->cur_rdma_length = data_len;
2261 2295
2262 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2296 /* if there is a single dma entry, dma mr is sufficient */
2263 fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, 2297 if (count == 1) {
2264 struct fast_reg_descriptor, list); 2298 ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
2265 list_del(&fr_desc->list); 2299 ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2266 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2300 ib_sge->lkey = isert_conn->conn_mr->lkey;
2267 wr->fr_desc = fr_desc; 2301 wr->fr_desc = NULL;
2302 } else {
2303 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2304 fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
2305 struct fast_reg_descriptor, list);
2306 list_del(&fr_desc->list);
2307 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2308 wr->fr_desc = fr_desc;
2268 2309
2269 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, 2310 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
2270 ib_sge, offset, data_len); 2311 ib_sge, offset, data_len);
2271 if (ret) { 2312 if (ret) {
2272 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); 2313 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
2273 goto unmap_sg; 2314 goto unmap_sg;
2315 }
2274 } 2316 }
2275 2317
2276 return 0; 2318 return 0;
@@ -2306,10 +2348,11 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2306 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2348 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2307 */ 2349 */
2308 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2350 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2309 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *) 2351 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2310 &isert_cmd->tx_desc.iscsi_header); 2352 &isert_cmd->tx_desc.iscsi_header);
2311 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2353 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2312 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr); 2354 isert_init_send_wr(isert_conn, isert_cmd,
2355 &isert_cmd->tx_desc.send_wr, true);
2313 2356
2314 atomic_inc(&isert_conn->post_send_buf_count); 2357 atomic_inc(&isert_conn->post_send_buf_count);
2315 2358
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 631f2090f0b8..691f90ff2d83 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -43,6 +43,8 @@ struct iser_tx_desc {
43 struct ib_sge tx_sg[2]; 43 struct ib_sge tx_sg[2];
44 int num_sge; 44 int num_sge;
45 struct isert_cmd *isert_cmd; 45 struct isert_cmd *isert_cmd;
46 struct llist_node *comp_llnode_batch;
47 struct llist_node comp_llnode;
46 struct ib_send_wr send_wr; 48 struct ib_send_wr send_wr;
47} __packed; 49} __packed;
48 50
@@ -121,6 +123,10 @@ struct isert_conn {
121 int conn_frwr_pool_size; 123 int conn_frwr_pool_size;
122 /* lock to protect frwr_pool */ 124 /* lock to protect frwr_pool */
123 spinlock_t conn_lock; 125 spinlock_t conn_lock;
126#define ISERT_COMP_BATCH_COUNT 8
127 int conn_comp_batch;
128 struct llist_head conn_comp_llist;
129 struct mutex conn_comp_mutex;
124}; 130};
125 131
126#define ISERT_MAX_CQ 64 132#define ISERT_MAX_CQ 64
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 6c923c7039a1..520a7e5a490b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1352,11 +1352,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1352 1352
1353 /* XXX(hch): this is a horrible layering violation.. */ 1353 /* XXX(hch): this is a horrible layering violation.. */
1354 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); 1354 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
1355 ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
1356 ioctx->cmd.transport_state &= ~CMD_T_ACTIVE; 1355 ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
1357 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); 1356 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
1358
1359 complete(&ioctx->cmd.transport_lun_stop_comp);
1360 break; 1357 break;
1361 case SRPT_STATE_CMD_RSP_SENT: 1358 case SRPT_STATE_CMD_RSP_SENT:
1362 /* 1359 /*
@@ -1364,9 +1361,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1364 * not been received in time. 1361 * not been received in time.
1365 */ 1362 */
1366 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); 1363 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1367 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
1368 ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
1369 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
1370 target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); 1364 target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
1371 break; 1365 break;
1372 case SRPT_STATE_MGMT_RSP_SENT: 1366 case SRPT_STATE_MGMT_RSP_SENT:
@@ -1476,7 +1470,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
1476{ 1470{
1477 struct se_cmd *cmd; 1471 struct se_cmd *cmd;
1478 enum srpt_command_state state; 1472 enum srpt_command_state state;
1479 unsigned long flags;
1480 1473
1481 cmd = &ioctx->cmd; 1474 cmd = &ioctx->cmd;
1482 state = srpt_get_cmd_state(ioctx); 1475 state = srpt_get_cmd_state(ioctx);
@@ -1496,9 +1489,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
1496 __func__, __LINE__, state); 1489 __func__, __LINE__, state);
1497 break; 1490 break;
1498 case SRPT_RDMA_WRITE_LAST: 1491 case SRPT_RDMA_WRITE_LAST:
1499 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
1500 ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
1501 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
1502 break; 1492 break;
1503 default: 1493 default:
1504 printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, 1494 printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index dbd2047f1641..3ed23513d881 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -536,7 +536,8 @@ static int adp5588_probe(struct i2c_client *client,
536 __set_bit(EV_REP, input->evbit); 536 __set_bit(EV_REP, input->evbit);
537 537
538 for (i = 0; i < input->keycodemax; i++) 538 for (i = 0; i < input->keycodemax; i++)
539 __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); 539 if (kpad->keycode[i] <= KEY_MAX)
540 __set_bit(kpad->keycode[i], input->keybit);
540 __clear_bit(KEY_RESERVED, input->keybit); 541 __clear_bit(KEY_RESERVED, input->keybit);
541 542
542 if (kpad->gpimapsize) 543 if (kpad->gpimapsize)
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 67d12b3427c9..60dafd4fa692 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -992,7 +992,8 @@ static int adp5589_probe(struct i2c_client *client,
992 __set_bit(EV_REP, input->evbit); 992 __set_bit(EV_REP, input->evbit);
993 993
994 for (i = 0; i < input->keycodemax; i++) 994 for (i = 0; i < input->keycodemax; i++)
995 __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); 995 if (kpad->keycode[i] <= KEY_MAX)
996 __set_bit(kpad->keycode[i], input->keybit);
996 __clear_bit(KEY_RESERVED, input->keybit); 997 __clear_bit(KEY_RESERVED, input->keybit);
997 998
998 if (kpad->gpimapsize) 999 if (kpad->gpimapsize)
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index fc88fb48d70d..09b91d093087 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -289,7 +289,8 @@ static int bfin_kpad_probe(struct platform_device *pdev)
289 __set_bit(EV_REP, input->evbit); 289 __set_bit(EV_REP, input->evbit);
290 290
291 for (i = 0; i < input->keycodemax; i++) 291 for (i = 0; i < input->keycodemax; i++)
292 __set_bit(bf54x_kpad->keycode[i] & KEY_MAX, input->keybit); 292 if (bf54x_kpad->keycode[i] <= KEY_MAX)
293 __set_bit(bf54x_kpad->keycode[i], input->keybit);
293 __clear_bit(KEY_RESERVED, input->keybit); 294 __clear_bit(KEY_RESERVED, input->keybit);
294 295
295 error = input_register_device(input); 296 error = input_register_device(input);
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 0735de3a6468..1cb1da294419 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -158,7 +158,7 @@
158 158
159/* ORIENT ADXL346 only */ 159/* ORIENT ADXL346 only */
160#define ADXL346_2D_VALID (1 << 6) 160#define ADXL346_2D_VALID (1 << 6)
161#define ADXL346_2D_ORIENT(x) (((x) & 0x3) >> 4) 161#define ADXL346_2D_ORIENT(x) (((x) & 0x30) >> 4)
162#define ADXL346_3D_VALID (1 << 3) 162#define ADXL346_3D_VALID (1 << 3)
163#define ADXL346_3D_ORIENT(x) ((x) & 0x7) 163#define ADXL346_3D_ORIENT(x) ((x) & 0x7)
164#define ADXL346_2D_PORTRAIT_POS 0 /* +X */ 164#define ADXL346_2D_PORTRAIT_POS 0 /* +X */
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 86b822806e95..45e0e3e55de2 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -180,7 +180,10 @@ static int64_t hp_sdc_rtc_read_i8042timer (uint8_t loadcmd, int numreg)
180 if (WARN_ON(down_interruptible(&i8042tregs))) 180 if (WARN_ON(down_interruptible(&i8042tregs)))
181 return -1; 181 return -1;
182 182
183 if (hp_sdc_enqueue_transaction(&t)) return -1; 183 if (hp_sdc_enqueue_transaction(&t)) {
184 up(&i8042tregs);
185 return -1;
186 }
184 187
185 /* Sleep until results come back. */ 188 /* Sleep until results come back. */
186 if (WARN_ON(down_interruptible(&i8042tregs))) 189 if (WARN_ON(down_interruptible(&i8042tregs)))
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index e37392976fdd..0deca5a3c87f 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -113,9 +113,12 @@ static int pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_i
113 idev->keycodemax = ARRAY_SIZE(lp->btncode); 113 idev->keycodemax = ARRAY_SIZE(lp->btncode);
114 114
115 for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) { 115 for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) {
116 lp->btncode[i] = pcf8574_kp_btncode[i]; 116 if (lp->btncode[i] <= KEY_MAX) {
117 __set_bit(lp->btncode[i] & KEY_MAX, idev->keybit); 117 lp->btncode[i] = pcf8574_kp_btncode[i];
118 __set_bit(lp->btncode[i], idev->keybit);
119 }
118 } 120 }
121 __clear_bit(KEY_RESERVED, idev->keybit);
119 122
120 sprintf(lp->name, DRV_NAME); 123 sprintf(lp->name, DRV_NAME);
121 sprintf(lp->phys, "kp_data/input0"); 124 sprintf(lp->phys, "kp_data/input0");
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index ca7a26f1dce8..5cf62e315218 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -70,6 +70,25 @@ static const struct alps_nibble_commands alps_v4_nibble_commands[] = {
70 { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ 70 { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
71}; 71};
72 72
73static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
74 { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */
75 { PSMOUSE_CMD_SETRATE, 0x0a }, /* 1 */
76 { PSMOUSE_CMD_SETRATE, 0x14 }, /* 2 */
77 { PSMOUSE_CMD_SETRATE, 0x28 }, /* 3 */
78 { PSMOUSE_CMD_SETRATE, 0x3c }, /* 4 */
79 { PSMOUSE_CMD_SETRATE, 0x50 }, /* 5 */
80 { PSMOUSE_CMD_SETRATE, 0x64 }, /* 6 */
81 { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 7 */
82 { PSMOUSE_CMD_GETID, 0x00 }, /* 8 */
83 { PSMOUSE_CMD_GETINFO, 0x00 }, /* 9 */
84 { PSMOUSE_CMD_SETRES, 0x00 }, /* a */
85 { PSMOUSE_CMD_SETRES, 0x01 }, /* b */
86 { PSMOUSE_CMD_SETRES, 0x02 }, /* c */
87 { PSMOUSE_CMD_SETRES, 0x03 }, /* d */
88 { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* e */
89 { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
90};
91
73 92
74#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */ 93#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
75#define ALPS_PASS 0x04 /* device has a pass-through port */ 94#define ALPS_PASS 0x04 /* device has a pass-through port */
@@ -103,6 +122,7 @@ static const struct alps_model_info alps_model_data[] = {
103 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ 122 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
104 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, 123 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
105 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 124 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
125 { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT }, /* Dell XT2 */
106 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 126 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
107 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, 127 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
108 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 128 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
@@ -645,6 +665,76 @@ static void alps_process_packet_v3(struct psmouse *psmouse)
645 alps_process_touchpad_packet_v3(psmouse); 665 alps_process_touchpad_packet_v3(psmouse);
646} 666}
647 667
668static void alps_process_packet_v6(struct psmouse *psmouse)
669{
670 struct alps_data *priv = psmouse->private;
671 unsigned char *packet = psmouse->packet;
672 struct input_dev *dev = psmouse->dev;
673 struct input_dev *dev2 = priv->dev2;
674 int x, y, z, left, right, middle;
675
676 /*
677 * We can use Byte5 to distinguish if the packet is from Touchpad
678 * or Trackpoint.
679 * Touchpad: 0 - 0x7E
680 * Trackpoint: 0x7F
681 */
682 if (packet[5] == 0x7F) {
683 /* It should be a DualPoint when received Trackpoint packet */
684 if (!(priv->flags & ALPS_DUALPOINT))
685 return;
686
687 /* Trackpoint packet */
688 x = packet[1] | ((packet[3] & 0x20) << 2);
689 y = packet[2] | ((packet[3] & 0x40) << 1);
690 z = packet[4];
691 left = packet[3] & 0x01;
692 right = packet[3] & 0x02;
693 middle = packet[3] & 0x04;
694
695 /* To prevent the cursor jump when finger lifted */
696 if (x == 0x7F && y == 0x7F && z == 0x7F)
697 x = y = z = 0;
698
699 /* Divide 4 since trackpoint's speed is too fast */
700 input_report_rel(dev2, REL_X, (char)x / 4);
701 input_report_rel(dev2, REL_Y, -((char)y / 4));
702
703 input_report_key(dev2, BTN_LEFT, left);
704 input_report_key(dev2, BTN_RIGHT, right);
705 input_report_key(dev2, BTN_MIDDLE, middle);
706
707 input_sync(dev2);
708 return;
709 }
710
711 /* Touchpad packet */
712 x = packet[1] | ((packet[3] & 0x78) << 4);
713 y = packet[2] | ((packet[4] & 0x78) << 4);
714 z = packet[5];
715 left = packet[3] & 0x01;
716 right = packet[3] & 0x02;
717
718 if (z > 30)
719 input_report_key(dev, BTN_TOUCH, 1);
720 if (z < 25)
721 input_report_key(dev, BTN_TOUCH, 0);
722
723 if (z > 0) {
724 input_report_abs(dev, ABS_X, x);
725 input_report_abs(dev, ABS_Y, y);
726 }
727
728 input_report_abs(dev, ABS_PRESSURE, z);
729 input_report_key(dev, BTN_TOOL_FINGER, z > 0);
730
731 /* v6 touchpad does not have middle button */
732 input_report_key(dev, BTN_LEFT, left);
733 input_report_key(dev, BTN_RIGHT, right);
734
735 input_sync(dev);
736}
737
648static void alps_process_packet_v4(struct psmouse *psmouse) 738static void alps_process_packet_v4(struct psmouse *psmouse)
649{ 739{
650 struct alps_data *priv = psmouse->private; 740 struct alps_data *priv = psmouse->private;
@@ -897,7 +987,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
897 } 987 }
898 988
899 /* Bytes 2 - pktsize should have 0 in the highest bit */ 989 /* Bytes 2 - pktsize should have 0 in the highest bit */
900 if (priv->proto_version != ALPS_PROTO_V5 && 990 if ((priv->proto_version < ALPS_PROTO_V5) &&
901 psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && 991 psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
902 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { 992 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
903 psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", 993 psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
@@ -1085,6 +1175,80 @@ static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
1085 return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL); 1175 return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL);
1086} 1176}
1087 1177
1178static int alps_monitor_mode_send_word(struct psmouse *psmouse, u16 word)
1179{
1180 int i, nibble;
1181
1182 /*
1183 * b0-b11 are valid bits, send sequence is inverse.
1184 * e.g. when word = 0x0123, nibble send sequence is 3, 2, 1
1185 */
1186 for (i = 0; i <= 8; i += 4) {
1187 nibble = (word >> i) & 0xf;
1188 if (alps_command_mode_send_nibble(psmouse, nibble))
1189 return -1;
1190 }
1191
1192 return 0;
1193}
1194
1195static int alps_monitor_mode_write_reg(struct psmouse *psmouse,
1196 u16 addr, u16 value)
1197{
1198 struct ps2dev *ps2dev = &psmouse->ps2dev;
1199
1200 /* 0x0A0 is the command to write the word */
1201 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE) ||
1202 alps_monitor_mode_send_word(psmouse, 0x0A0) ||
1203 alps_monitor_mode_send_word(psmouse, addr) ||
1204 alps_monitor_mode_send_word(psmouse, value) ||
1205 ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE))
1206 return -1;
1207
1208 return 0;
1209}
1210
1211static int alps_monitor_mode(struct psmouse *psmouse, bool enable)
1212{
1213 struct ps2dev *ps2dev = &psmouse->ps2dev;
1214
1215 if (enable) {
1216 /* EC E9 F5 F5 E7 E6 E7 E9 to enter monitor mode */
1217 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
1218 ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO) ||
1219 ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
1220 ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
1221 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
1222 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1223 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
1224 ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO))
1225 return -1;
1226 } else {
1227 /* EC to exit monitor mode */
1228 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP))
1229 return -1;
1230 }
1231
1232 return 0;
1233}
1234
1235static int alps_absolute_mode_v6(struct psmouse *psmouse)
1236{
1237 u16 reg_val = 0x181;
1238 int ret = -1;
1239
1240 /* enter monitor mode, to write the register */
1241 if (alps_monitor_mode(psmouse, true))
1242 return -1;
1243
1244 ret = alps_monitor_mode_write_reg(psmouse, 0x000, reg_val);
1245
1246 if (alps_monitor_mode(psmouse, false))
1247 ret = -1;
1248
1249 return ret;
1250}
1251
1088static int alps_get_status(struct psmouse *psmouse, char *param) 1252static int alps_get_status(struct psmouse *psmouse, char *param)
1089{ 1253{
1090 /* Get status: 0xF5 0xF5 0xF5 0xE9 */ 1254 /* Get status: 0xF5 0xF5 0xF5 0xE9 */
@@ -1189,6 +1353,32 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
1189 return 0; 1353 return 0;
1190} 1354}
1191 1355
1356static int alps_hw_init_v6(struct psmouse *psmouse)
1357{
1358 unsigned char param[2] = {0xC8, 0x14};
1359
1360 /* Enter passthrough mode to let trackpoint enter 6byte raw mode */
1361 if (alps_passthrough_mode_v2(psmouse, true))
1362 return -1;
1363
1364 if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1365 ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1366 ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1367 ps2_command(&psmouse->ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
1368 ps2_command(&psmouse->ps2dev, &param[1], PSMOUSE_CMD_SETRATE))
1369 return -1;
1370
1371 if (alps_passthrough_mode_v2(psmouse, false))
1372 return -1;
1373
1374 if (alps_absolute_mode_v6(psmouse)) {
1375 psmouse_err(psmouse, "Failed to enable absolute mode\n");
1376 return -1;
1377 }
1378
1379 return 0;
1380}
1381
1192/* 1382/*
1193 * Enable or disable passthrough mode to the trackstick. 1383 * Enable or disable passthrough mode to the trackstick.
1194 */ 1384 */
@@ -1553,6 +1743,8 @@ static void alps_set_defaults(struct alps_data *priv)
1553 priv->hw_init = alps_hw_init_v1_v2; 1743 priv->hw_init = alps_hw_init_v1_v2;
1554 priv->process_packet = alps_process_packet_v1_v2; 1744 priv->process_packet = alps_process_packet_v1_v2;
1555 priv->set_abs_params = alps_set_abs_params_st; 1745 priv->set_abs_params = alps_set_abs_params_st;
1746 priv->x_max = 1023;
1747 priv->y_max = 767;
1556 break; 1748 break;
1557 case ALPS_PROTO_V3: 1749 case ALPS_PROTO_V3:
1558 priv->hw_init = alps_hw_init_v3; 1750 priv->hw_init = alps_hw_init_v3;
@@ -1584,6 +1776,14 @@ static void alps_set_defaults(struct alps_data *priv)
1584 priv->x_bits = 23; 1776 priv->x_bits = 23;
1585 priv->y_bits = 12; 1777 priv->y_bits = 12;
1586 break; 1778 break;
1779 case ALPS_PROTO_V6:
1780 priv->hw_init = alps_hw_init_v6;
1781 priv->process_packet = alps_process_packet_v6;
1782 priv->set_abs_params = alps_set_abs_params_st;
1783 priv->nibble_commands = alps_v6_nibble_commands;
1784 priv->x_max = 2047;
1785 priv->y_max = 1535;
1786 break;
1587 } 1787 }
1588} 1788}
1589 1789
@@ -1705,8 +1905,8 @@ static void alps_disconnect(struct psmouse *psmouse)
1705static void alps_set_abs_params_st(struct alps_data *priv, 1905static void alps_set_abs_params_st(struct alps_data *priv,
1706 struct input_dev *dev1) 1906 struct input_dev *dev1)
1707{ 1907{
1708 input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0); 1908 input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0);
1709 input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0); 1909 input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0);
1710} 1910}
1711 1911
1712static void alps_set_abs_params_mt(struct alps_data *priv, 1912static void alps_set_abs_params_mt(struct alps_data *priv,
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index eee59853b9ce..704f0f924307 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -17,6 +17,7 @@
17#define ALPS_PROTO_V3 3 17#define ALPS_PROTO_V3 3
18#define ALPS_PROTO_V4 4 18#define ALPS_PROTO_V4 4
19#define ALPS_PROTO_V5 5 19#define ALPS_PROTO_V5 5
20#define ALPS_PROTO_V6 6
20 21
21/** 22/**
22 * struct alps_model_info - touchpad ID table 23 * struct alps_model_info - touchpad ID table
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 8551dcaf24db..597e9b8fc18d 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1313,6 +1313,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1313 break; 1313 break;
1314 case 6: 1314 case 6:
1315 case 7: 1315 case 7:
1316 case 8:
1316 etd->hw_version = 4; 1317 etd->hw_version = 4;
1317 break; 1318 break;
1318 default: 1319 default:
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 98707fb2cb5d..8f4c4ab04bc2 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -455,16 +455,26 @@ static DEVICE_ATTR_RO(type);
455static DEVICE_ATTR_RO(proto); 455static DEVICE_ATTR_RO(proto);
456static DEVICE_ATTR_RO(id); 456static DEVICE_ATTR_RO(id);
457static DEVICE_ATTR_RO(extra); 457static DEVICE_ATTR_RO(extra);
458static DEVICE_ATTR_RO(modalias);
459static DEVICE_ATTR_WO(drvctl);
460static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
461static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
462 458
463static struct attribute *serio_device_id_attrs[] = { 459static struct attribute *serio_device_id_attrs[] = {
464 &dev_attr_type.attr, 460 &dev_attr_type.attr,
465 &dev_attr_proto.attr, 461 &dev_attr_proto.attr,
466 &dev_attr_id.attr, 462 &dev_attr_id.attr,
467 &dev_attr_extra.attr, 463 &dev_attr_extra.attr,
464 NULL
465};
466
467static struct attribute_group serio_id_attr_group = {
468 .name = "id",
469 .attrs = serio_device_id_attrs,
470};
471
472static DEVICE_ATTR_RO(modalias);
473static DEVICE_ATTR_WO(drvctl);
474static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
475static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
476
477static struct attribute *serio_device_attrs[] = {
468 &dev_attr_modalias.attr, 478 &dev_attr_modalias.attr,
469 &dev_attr_description.attr, 479 &dev_attr_description.attr,
470 &dev_attr_drvctl.attr, 480 &dev_attr_drvctl.attr,
@@ -472,13 +482,13 @@ static struct attribute *serio_device_id_attrs[] = {
472 NULL 482 NULL
473}; 483};
474 484
475static struct attribute_group serio_id_attr_group = { 485static struct attribute_group serio_device_attr_group = {
476 .name = "id", 486 .attrs = serio_device_attrs,
477 .attrs = serio_device_id_attrs,
478}; 487};
479 488
480static const struct attribute_group *serio_device_attr_groups[] = { 489static const struct attribute_group *serio_device_attr_groups[] = {
481 &serio_id_attr_group, 490 &serio_id_attr_group,
491 &serio_device_attr_group,
482 NULL 492 NULL
483}; 493};
484 494
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 00d1e547b211..961d58d32647 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -906,6 +906,17 @@ config TOUCHSCREEN_STMPE
906 To compile this driver as a module, choose M here: the 906 To compile this driver as a module, choose M here: the
907 module will be called stmpe-ts. 907 module will be called stmpe-ts.
908 908
909config TOUCHSCREEN_SUR40
910 tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
911 depends on USB
912 select INPUT_POLLDEV
913 help
914 Say Y here if you want support for the Samsung SUR40 touchscreen
915 (also known as Microsoft Surface 2.0 or Microsoft PixelSense).
916
917 To compile this driver as a module, choose M here: the
918 module will be called sur40.
919
909config TOUCHSCREEN_TPS6507X 920config TOUCHSCREEN_TPS6507X
910 tristate "TPS6507x based touchscreens" 921 tristate "TPS6507x based touchscreens"
911 depends on I2C 922 depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7587883b8d38..62801f213346 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
54obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o 54obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
55obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o 55obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
56obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o 56obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
57obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o
57obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o 58obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
58obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o 59obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
59obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o 60obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c
index 268a35e55d7f..279c0e42b8a7 100644
--- a/drivers/input/touchscreen/atmel-wm97xx.c
+++ b/drivers/input/touchscreen/atmel-wm97xx.c
@@ -391,7 +391,7 @@ static int __exit atmel_wm97xx_remove(struct platform_device *pdev)
391} 391}
392 392
393#ifdef CONFIG_PM_SLEEP 393#ifdef CONFIG_PM_SLEEP
394static int atmel_wm97xx_suspend(struct *dev) 394static int atmel_wm97xx_suspend(struct device *dev)
395{ 395{
396 struct platform_device *pdev = to_platform_device(dev); 396 struct platform_device *pdev = to_platform_device(dev);
397 struct atmel_wm97xx *atmel_wm97xx = platform_get_drvdata(pdev); 397 struct atmel_wm97xx *atmel_wm97xx = platform_get_drvdata(pdev);
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 42d830efa316..a035a390f8e2 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1246,8 +1246,7 @@ static void cyttsp4_watchdog_timer(unsigned long handle)
1246 1246
1247 dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__); 1247 dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__);
1248 1248
1249 if (!work_pending(&cd->watchdog_work)) 1249 schedule_work(&cd->watchdog_work);
1250 schedule_work(&cd->watchdog_work);
1251 1250
1252 return; 1251 return;
1253} 1252}
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
new file mode 100644
index 000000000000..f1cb05148b46
--- /dev/null
+++ b/drivers/input/touchscreen/sur40.c
@@ -0,0 +1,466 @@
1/*
2 * Surface2.0/SUR40/PixelSense input driver
3 *
4 * Copyright (c) 2013 by Florian 'floe' Echtler <floe@butterbrot.org>
5 *
6 * Derived from the USB Skeleton driver 1.1,
7 * Copyright (c) 2003 Greg Kroah-Hartman (greg@kroah.com)
8 *
9 * and from the Apple USB BCM5974 multitouch driver,
10 * Copyright (c) 2008 Henrik Rydberg (rydberg@euromail.se)
11 *
12 * and from the generic hid-multitouch driver,
13 * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 */
20
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/slab.h>
26#include <linux/module.h>
27#include <linux/completion.h>
28#include <linux/uaccess.h>
29#include <linux/usb.h>
30#include <linux/printk.h>
31#include <linux/input-polldev.h>
32#include <linux/input/mt.h>
33#include <linux/usb/input.h>
34
35/* read 512 bytes from endpoint 0x86 -> get header + blobs */
36struct sur40_header {
37
38 __le16 type; /* always 0x0001 */
39 __le16 count; /* count of blobs (if 0: continue prev. packet) */
40
41 __le32 packet_id; /* unique ID for all packets in one frame */
42
43 __le32 timestamp; /* milliseconds (inc. by 16 or 17 each frame) */
44 __le32 unknown; /* "epoch?" always 02/03 00 00 00 */
45
46} __packed;
47
48struct sur40_blob {
49
50 __le16 blob_id;
51
52 u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */
53 u8 unknown; /* always 0x01 or 0x02 (no idea what this is?) */
54
55 __le16 bb_pos_x; /* upper left corner of bounding box */
56 __le16 bb_pos_y;
57
58 __le16 bb_size_x; /* size of bounding box */
59 __le16 bb_size_y;
60
61 __le16 pos_x; /* finger tip position */
62 __le16 pos_y;
63
64 __le16 ctr_x; /* centroid position */
65 __le16 ctr_y;
66
67 __le16 axis_x; /* somehow related to major/minor axis, mostly: */
68 __le16 axis_y; /* axis_x == bb_size_y && axis_y == bb_size_x */
69
70 __le32 angle; /* orientation in radians relative to x axis -
71 actually an IEEE754 float, don't use in kernel */
72
73 __le32 area; /* size in pixels/pressure (?) */
74
75 u8 padding[32];
76
77} __packed;
78
79/* combined header/blob data */
80struct sur40_data {
81 struct sur40_header header;
82 struct sur40_blob blobs[];
83} __packed;
84
85
86/* version information */
87#define DRIVER_SHORT "sur40"
88#define DRIVER_AUTHOR "Florian 'floe' Echtler <floe@butterbrot.org>"
89#define DRIVER_DESC "Surface2.0/SUR40/PixelSense input driver"
90
91/* vendor and device IDs */
92#define ID_MICROSOFT 0x045e
93#define ID_SUR40 0x0775
94
95/* sensor resolution */
96#define SENSOR_RES_X 1920
97#define SENSOR_RES_Y 1080
98
99/* touch data endpoint */
100#define TOUCH_ENDPOINT 0x86
101
102/* polling interval (ms) */
103#define POLL_INTERVAL 10
104
105/* maximum number of contacts FIXME: this is a guess? */
106#define MAX_CONTACTS 64
107
108/* control commands */
109#define SUR40_GET_VERSION 0xb0 /* 12 bytes string */
110#define SUR40_UNKNOWN1 0xb3 /* 5 bytes */
111#define SUR40_UNKNOWN2 0xc1 /* 24 bytes */
112
113#define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */
114#define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */
115
116/*
117 * Note: an earlier, non-public version of this driver used USB_RECIP_ENDPOINT
118 * here by mistake which is very likely to have corrupted the firmware EEPROM
119 * on two separate SUR40 devices. Thanks to Alan Stern who spotted this bug.
120 * Should you ever run into a similar problem, the background story to this
121 * incident and instructions on how to fix the corrupted EEPROM are available
122 * at https://floe.butterbrot.org/matrix/hacking/surface/brick.html
123*/
124
125struct sur40_state {
126
127 struct usb_device *usbdev;
128 struct device *dev;
129 struct input_polled_dev *input;
130
131 struct sur40_data *bulk_in_buffer;
132 size_t bulk_in_size;
133 u8 bulk_in_epaddr;
134
135 char phys[64];
136};
137
138static int sur40_command(struct sur40_state *dev,
139 u8 command, u16 index, void *buffer, u16 size)
140{
141 return usb_control_msg(dev->usbdev, usb_rcvctrlpipe(dev->usbdev, 0),
142 command,
143 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
144 0x00, index, buffer, size, 1000);
145}
146
147/* Initialization routine, called from sur40_open */
148static int sur40_init(struct sur40_state *dev)
149{
150 int result;
151 u8 buffer[24];
152
153 /* stupidly replay the original MS driver init sequence */
154 result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12);
155 if (result < 0)
156 return result;
157
158 result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12);
159 if (result < 0)
160 return result;
161
162 result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12);
163 if (result < 0)
164 return result;
165
166 result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24);
167 if (result < 0)
168 return result;
169
170 result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5);
171 if (result < 0)
172 return result;
173
174 result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12);
175
176 /*
177 * Discard the result buffer - no known data inside except
178 * some version strings, maybe extract these sometime...
179 */
180
181 return result;
182}
183
184/*
185 * Callback routines from input_polled_dev
186 */
187
188/* Enable the device, polling will now start. */
189static void sur40_open(struct input_polled_dev *polldev)
190{
191 struct sur40_state *sur40 = polldev->private;
192
193 dev_dbg(sur40->dev, "open\n");
194 sur40_init(sur40);
195}
196
197/* Disable device, polling has stopped. */
198static void sur40_close(struct input_polled_dev *polldev)
199{
200 struct sur40_state *sur40 = polldev->private;
201
202 dev_dbg(sur40->dev, "close\n");
203 /*
204 * There is no known way to stop the device, so we simply
205 * stop polling.
206 */
207}
208
209/*
210 * This function is called when a whole contact has been processed,
211 * so that it can assign it to a slot and store the data there.
212 */
213static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input)
214{
215 int wide, major, minor;
216
217 int bb_size_x = le16_to_cpu(blob->bb_size_x);
218 int bb_size_y = le16_to_cpu(blob->bb_size_y);
219
220 int pos_x = le16_to_cpu(blob->pos_x);
221 int pos_y = le16_to_cpu(blob->pos_y);
222
223 int ctr_x = le16_to_cpu(blob->ctr_x);
224 int ctr_y = le16_to_cpu(blob->ctr_y);
225
226 int slotnum = input_mt_get_slot_by_key(input, blob->blob_id);
227 if (slotnum < 0 || slotnum >= MAX_CONTACTS)
228 return;
229
230 input_mt_slot(input, slotnum);
231 input_mt_report_slot_state(input, MT_TOOL_FINGER, 1);
232 wide = (bb_size_x > bb_size_y);
233 major = max(bb_size_x, bb_size_y);
234 minor = min(bb_size_x, bb_size_y);
235
236 input_report_abs(input, ABS_MT_POSITION_X, pos_x);
237 input_report_abs(input, ABS_MT_POSITION_Y, pos_y);
238 input_report_abs(input, ABS_MT_TOOL_X, ctr_x);
239 input_report_abs(input, ABS_MT_TOOL_Y, ctr_y);
240
241 /* TODO: use a better orientation measure */
242 input_report_abs(input, ABS_MT_ORIENTATION, wide);
243 input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
244 input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
245}
246
247/* core function: poll for new input data */
248static void sur40_poll(struct input_polled_dev *polldev)
249{
250
251 struct sur40_state *sur40 = polldev->private;
252 struct input_dev *input = polldev->input;
253 int result, bulk_read, need_blobs, packet_blobs, i;
254 u32 uninitialized_var(packet_id);
255
256 struct sur40_header *header = &sur40->bulk_in_buffer->header;
257 struct sur40_blob *inblob = &sur40->bulk_in_buffer->blobs[0];
258
259 dev_dbg(sur40->dev, "poll\n");
260
261 need_blobs = -1;
262
263 do {
264
265 /* perform a blocking bulk read to get data from the device */
266 result = usb_bulk_msg(sur40->usbdev,
267 usb_rcvbulkpipe(sur40->usbdev, sur40->bulk_in_epaddr),
268 sur40->bulk_in_buffer, sur40->bulk_in_size,
269 &bulk_read, 1000);
270
271 dev_dbg(sur40->dev, "received %d bytes\n", bulk_read);
272
273 if (result < 0) {
274 dev_err(sur40->dev, "error in usb_bulk_read\n");
275 return;
276 }
277
278 result = bulk_read - sizeof(struct sur40_header);
279
280 if (result % sizeof(struct sur40_blob) != 0) {
281 dev_err(sur40->dev, "transfer size mismatch\n");
282 return;
283 }
284
285 /* first packet? */
286 if (need_blobs == -1) {
287 need_blobs = le16_to_cpu(header->count);
288 dev_dbg(sur40->dev, "need %d blobs\n", need_blobs);
289 packet_id = le32_to_cpu(header->packet_id);
290 }
291
292 /*
293 * Sanity check. when video data is also being retrieved, the
294 * packet ID will usually increase in the middle of a series
295 * instead of at the end.
296 */
297 if (packet_id != header->packet_id)
298 dev_warn(sur40->dev, "packet ID mismatch\n");
299
300 packet_blobs = result / sizeof(struct sur40_blob);
301 dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs);
302
303 /* packets always contain at least 4 blobs, even if empty */
304 if (packet_blobs > need_blobs)
305 packet_blobs = need_blobs;
306
307 for (i = 0; i < packet_blobs; i++) {
308 need_blobs--;
309 dev_dbg(sur40->dev, "processing blob\n");
310 sur40_report_blob(&(inblob[i]), input);
311 }
312
313 } while (need_blobs > 0);
314
315 input_mt_sync_frame(input);
316 input_sync(input);
317}
318
319/* Initialize input device parameters. */
320static void sur40_input_setup(struct input_dev *input_dev)
321{
322 __set_bit(EV_KEY, input_dev->evbit);
323 __set_bit(EV_ABS, input_dev->evbit);
324
325 input_set_abs_params(input_dev, ABS_MT_POSITION_X,
326 0, SENSOR_RES_X, 0, 0);
327 input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
328 0, SENSOR_RES_Y, 0, 0);
329
330 input_set_abs_params(input_dev, ABS_MT_TOOL_X,
331 0, SENSOR_RES_X, 0, 0);
332 input_set_abs_params(input_dev, ABS_MT_TOOL_Y,
333 0, SENSOR_RES_Y, 0, 0);
334
335 /* max value unknown, but major/minor axis
336 * can never be larger than screen */
337 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
338 0, SENSOR_RES_X, 0, 0);
339 input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
340 0, SENSOR_RES_Y, 0, 0);
341
342 input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
343
344 input_mt_init_slots(input_dev, MAX_CONTACTS,
345 INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
346}
347
348/* Check candidate USB interface. */
349static int sur40_probe(struct usb_interface *interface,
350 const struct usb_device_id *id)
351{
352 struct usb_device *usbdev = interface_to_usbdev(interface);
353 struct sur40_state *sur40;
354 struct usb_host_interface *iface_desc;
355 struct usb_endpoint_descriptor *endpoint;
356 struct input_polled_dev *poll_dev;
357 int error;
358
359 /* Check if we really have the right interface. */
360 iface_desc = &interface->altsetting[0];
361 if (iface_desc->desc.bInterfaceClass != 0xFF)
362 return -ENODEV;
363
364 /* Use endpoint #4 (0x86). */
365 endpoint = &iface_desc->endpoint[4].desc;
366 if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
367 return -ENODEV;
368
369 /* Allocate memory for our device state and initialize it. */
370 sur40 = kzalloc(sizeof(struct sur40_state), GFP_KERNEL);
371 if (!sur40)
372 return -ENOMEM;
373
374 poll_dev = input_allocate_polled_device();
375 if (!poll_dev) {
376 error = -ENOMEM;
377 goto err_free_dev;
378 }
379
380 /* Set up polled input device control structure */
381 poll_dev->private = sur40;
382 poll_dev->poll_interval = POLL_INTERVAL;
383 poll_dev->open = sur40_open;
384 poll_dev->poll = sur40_poll;
385 poll_dev->close = sur40_close;
386
387 /* Set up regular input device structure */
388 sur40_input_setup(poll_dev->input);
389
390 poll_dev->input->name = "Samsung SUR40";
391 usb_to_input_id(usbdev, &poll_dev->input->id);
392 usb_make_path(usbdev, sur40->phys, sizeof(sur40->phys));
393 strlcat(sur40->phys, "/input0", sizeof(sur40->phys));
394 poll_dev->input->phys = sur40->phys;
395 poll_dev->input->dev.parent = &interface->dev;
396
397 sur40->usbdev = usbdev;
398 sur40->dev = &interface->dev;
399 sur40->input = poll_dev;
400
401 /* use the bulk-in endpoint tested above */
402 sur40->bulk_in_size = usb_endpoint_maxp(endpoint);
403 sur40->bulk_in_epaddr = endpoint->bEndpointAddress;
404 sur40->bulk_in_buffer = kmalloc(sur40->bulk_in_size, GFP_KERNEL);
405 if (!sur40->bulk_in_buffer) {
406 dev_err(&interface->dev, "Unable to allocate input buffer.");
407 error = -ENOMEM;
408 goto err_free_polldev;
409 }
410
411 error = input_register_polled_device(poll_dev);
412 if (error) {
413 dev_err(&interface->dev,
414 "Unable to register polled input device.");
415 goto err_free_buffer;
416 }
417
418 /* we can register the device now, as it is ready */
419 usb_set_intfdata(interface, sur40);
420 dev_dbg(&interface->dev, "%s is now attached\n", DRIVER_DESC);
421
422 return 0;
423
424err_free_buffer:
425 kfree(sur40->bulk_in_buffer);
426err_free_polldev:
427 input_free_polled_device(sur40->input);
428err_free_dev:
429 kfree(sur40);
430
431 return error;
432}
433
434/* Unregister device & clean up. */
435static void sur40_disconnect(struct usb_interface *interface)
436{
437 struct sur40_state *sur40 = usb_get_intfdata(interface);
438
439 input_unregister_polled_device(sur40->input);
440 input_free_polled_device(sur40->input);
441 kfree(sur40->bulk_in_buffer);
442 kfree(sur40);
443
444 usb_set_intfdata(interface, NULL);
445 dev_dbg(&interface->dev, "%s is now disconnected\n", DRIVER_DESC);
446}
447
448static const struct usb_device_id sur40_table[] = {
449 { USB_DEVICE(ID_MICROSOFT, ID_SUR40) }, /* Samsung SUR40 */
450 { } /* terminating null entry */
451};
452MODULE_DEVICE_TABLE(usb, sur40_table);
453
454/* USB-specific object needed to register this driver with the USB subsystem. */
455static struct usb_driver sur40_driver = {
456 .name = DRIVER_SHORT,
457 .probe = sur40_probe,
458 .disconnect = sur40_disconnect,
459 .id_table = sur40_table,
460};
461
462module_usb_driver(sur40_driver);
463
464MODULE_AUTHOR(DRIVER_AUTHOR);
465MODULE_DESCRIPTION(DRIVER_DESC);
466MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index ae4b6b903629..5f87bed05467 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -106,6 +106,7 @@ struct usbtouch_device_info {
106struct usbtouch_usb { 106struct usbtouch_usb {
107 unsigned char *data; 107 unsigned char *data;
108 dma_addr_t data_dma; 108 dma_addr_t data_dma;
109 int data_size;
109 unsigned char *buffer; 110 unsigned char *buffer;
110 int buf_len; 111 int buf_len;
111 struct urb *irq; 112 struct urb *irq;
@@ -1521,7 +1522,7 @@ static int usbtouch_reset_resume(struct usb_interface *intf)
1521static void usbtouch_free_buffers(struct usb_device *udev, 1522static void usbtouch_free_buffers(struct usb_device *udev,
1522 struct usbtouch_usb *usbtouch) 1523 struct usbtouch_usb *usbtouch)
1523{ 1524{
1524 usb_free_coherent(udev, usbtouch->type->rept_size, 1525 usb_free_coherent(udev, usbtouch->data_size,
1525 usbtouch->data, usbtouch->data_dma); 1526 usbtouch->data, usbtouch->data_dma);
1526 kfree(usbtouch->buffer); 1527 kfree(usbtouch->buffer);
1527} 1528}
@@ -1566,7 +1567,20 @@ static int usbtouch_probe(struct usb_interface *intf,
1566 if (!type->process_pkt) 1567 if (!type->process_pkt)
1567 type->process_pkt = usbtouch_process_pkt; 1568 type->process_pkt = usbtouch_process_pkt;
1568 1569
1569 usbtouch->data = usb_alloc_coherent(udev, type->rept_size, 1570 usbtouch->data_size = type->rept_size;
1571 if (type->get_pkt_len) {
1572 /*
1573 * When dealing with variable-length packets we should
1574 * not request more than wMaxPacketSize bytes at once
1575 * as we do not know if there is more data coming or
1576 * we filled exactly wMaxPacketSize bytes and there is
1577 * nothing else.
1578 */
1579 usbtouch->data_size = min(usbtouch->data_size,
1580 usb_endpoint_maxp(endpoint));
1581 }
1582
1583 usbtouch->data = usb_alloc_coherent(udev, usbtouch->data_size,
1570 GFP_KERNEL, &usbtouch->data_dma); 1584 GFP_KERNEL, &usbtouch->data_dma);
1571 if (!usbtouch->data) 1585 if (!usbtouch->data)
1572 goto out_free; 1586 goto out_free;
@@ -1626,12 +1640,12 @@ static int usbtouch_probe(struct usb_interface *intf,
1626 if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT) 1640 if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT)
1627 usb_fill_int_urb(usbtouch->irq, udev, 1641 usb_fill_int_urb(usbtouch->irq, udev,
1628 usb_rcvintpipe(udev, endpoint->bEndpointAddress), 1642 usb_rcvintpipe(udev, endpoint->bEndpointAddress),
1629 usbtouch->data, type->rept_size, 1643 usbtouch->data, usbtouch->data_size,
1630 usbtouch_irq, usbtouch, endpoint->bInterval); 1644 usbtouch_irq, usbtouch, endpoint->bInterval);
1631 else 1645 else
1632 usb_fill_bulk_urb(usbtouch->irq, udev, 1646 usb_fill_bulk_urb(usbtouch->irq, udev,
1633 usb_rcvbulkpipe(udev, endpoint->bEndpointAddress), 1647 usb_rcvbulkpipe(udev, endpoint->bEndpointAddress),
1634 usbtouch->data, type->rept_size, 1648 usbtouch->data, usbtouch->data_size,
1635 usbtouch_irq, usbtouch); 1649 usbtouch_irq, usbtouch);
1636 1650
1637 usbtouch->irq->dev = udev; 1651 usbtouch->irq->dev = udev;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1abfb5684ab7..e46a88700b68 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -392,7 +392,7 @@ struct arm_smmu_domain {
392 struct arm_smmu_cfg root_cfg; 392 struct arm_smmu_cfg root_cfg;
393 phys_addr_t output_mask; 393 phys_addr_t output_mask;
394 394
395 spinlock_t lock; 395 struct mutex lock;
396}; 396};
397 397
398static DEFINE_SPINLOCK(arm_smmu_devices_lock); 398static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
900 goto out_free_domain; 900 goto out_free_domain;
901 smmu_domain->root_cfg.pgd = pgd; 901 smmu_domain->root_cfg.pgd = pgd;
902 902
903 spin_lock_init(&smmu_domain->lock); 903 mutex_init(&smmu_domain->lock);
904 domain->priv = smmu_domain; 904 domain->priv = smmu_domain;
905 return 0; 905 return 0;
906 906
@@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1137 * Sanity check the domain. We don't currently support domains 1137 * Sanity check the domain. We don't currently support domains
1138 * that cross between different SMMU chains. 1138 * that cross between different SMMU chains.
1139 */ 1139 */
1140 spin_lock(&smmu_domain->lock); 1140 mutex_lock(&smmu_domain->lock);
1141 if (!smmu_domain->leaf_smmu) { 1141 if (!smmu_domain->leaf_smmu) {
1142 /* Now that we have a master, we can finalise the domain */ 1142 /* Now that we have a master, we can finalise the domain */
1143 ret = arm_smmu_init_domain_context(domain, dev); 1143 ret = arm_smmu_init_domain_context(domain, dev);
@@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1152 dev_name(device_smmu->dev)); 1152 dev_name(device_smmu->dev));
1153 goto err_unlock; 1153 goto err_unlock;
1154 } 1154 }
1155 spin_unlock(&smmu_domain->lock); 1155 mutex_unlock(&smmu_domain->lock);
1156 1156
1157 /* Looks ok, so add the device to the domain */ 1157 /* Looks ok, so add the device to the domain */
1158 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); 1158 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1162 return arm_smmu_domain_add_master(smmu_domain, master); 1162 return arm_smmu_domain_add_master(smmu_domain, master);
1163 1163
1164err_unlock: 1164err_unlock:
1165 spin_unlock(&smmu_domain->lock); 1165 mutex_unlock(&smmu_domain->lock);
1166 return ret; 1166 return ret;
1167} 1167}
1168 1168
@@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1394 if (paddr & ~output_mask) 1394 if (paddr & ~output_mask)
1395 return -ERANGE; 1395 return -ERANGE;
1396 1396
1397 spin_lock(&smmu_domain->lock); 1397 mutex_lock(&smmu_domain->lock);
1398 pgd += pgd_index(iova); 1398 pgd += pgd_index(iova);
1399 end = iova + size; 1399 end = iova + size;
1400 do { 1400 do {
@@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1410 } while (pgd++, iova != end); 1410 } while (pgd++, iova != end);
1411 1411
1412out_unlock: 1412out_unlock:
1413 spin_unlock(&smmu_domain->lock); 1413 mutex_unlock(&smmu_domain->lock);
1414 1414
1415 /* Ensure new page tables are visible to the hardware walker */ 1415 /* Ensure new page tables are visible to the hardware walker */
1416 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) 1416 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
@@ -1423,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1423 phys_addr_t paddr, size_t size, int flags) 1423 phys_addr_t paddr, size_t size, int flags)
1424{ 1424{
1425 struct arm_smmu_domain *smmu_domain = domain->priv; 1425 struct arm_smmu_domain *smmu_domain = domain->priv;
1426 struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
1427 1426
1428 if (!smmu_domain || !smmu) 1427 if (!smmu_domain)
1429 return -ENODEV; 1428 return -ENODEV;
1430 1429
1431 /* Check for silent address truncation up the SMMU chain. */ 1430 /* Check for silent address truncation up the SMMU chain. */
@@ -1449,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1449static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, 1448static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1450 dma_addr_t iova) 1449 dma_addr_t iova)
1451{ 1450{
1452 pgd_t *pgd; 1451 pgd_t *pgdp, pgd;
1453 pud_t *pud; 1452 pud_t pud;
1454 pmd_t *pmd; 1453 pmd_t pmd;
1455 pte_t *pte; 1454 pte_t pte;
1456 struct arm_smmu_domain *smmu_domain = domain->priv; 1455 struct arm_smmu_domain *smmu_domain = domain->priv;
1457 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1456 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1458 struct arm_smmu_device *smmu = root_cfg->smmu;
1459 1457
1460 spin_lock(&smmu_domain->lock); 1458 pgdp = root_cfg->pgd;
1461 pgd = root_cfg->pgd; 1459 if (!pgdp)
1462 if (!pgd) 1460 return 0;
1463 goto err_unlock;
1464 1461
1465 pgd += pgd_index(iova); 1462 pgd = *(pgdp + pgd_index(iova));
1466 if (pgd_none_or_clear_bad(pgd)) 1463 if (pgd_none(pgd))
1467 goto err_unlock; 1464 return 0;
1468 1465
1469 pud = pud_offset(pgd, iova); 1466 pud = *pud_offset(&pgd, iova);
1470 if (pud_none_or_clear_bad(pud)) 1467 if (pud_none(pud))
1471 goto err_unlock; 1468 return 0;
1472 1469
1473 pmd = pmd_offset(pud, iova); 1470 pmd = *pmd_offset(&pud, iova);
1474 if (pmd_none_or_clear_bad(pmd)) 1471 if (pmd_none(pmd))
1475 goto err_unlock; 1472 return 0;
1476 1473
1477 pte = pmd_page_vaddr(*pmd) + pte_index(iova); 1474 pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
1478 if (pte_none(pte)) 1475 if (pte_none(pte))
1479 goto err_unlock; 1476 return 0;
1480
1481 spin_unlock(&smmu_domain->lock);
1482 return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
1483 1477
1484err_unlock: 1478 return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
1485 spin_unlock(&smmu_domain->lock);
1486 dev_warn(smmu->dev,
1487 "invalid (corrupt?) page tables detected for iova 0x%llx\n",
1488 (unsigned long long)iova);
1489 return -EINVAL;
1490} 1479}
1491 1480
1492static int arm_smmu_domain_has_cap(struct iommu_domain *domain, 1481static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
@@ -1863,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1863 dev_err(dev, 1852 dev_err(dev,
1864 "found only %d context interrupt(s) but %d required\n", 1853 "found only %d context interrupt(s) but %d required\n",
1865 smmu->num_context_irqs, smmu->num_context_banks); 1854 smmu->num_context_irqs, smmu->num_context_banks);
1855 err = -ENODEV;
1866 goto out_put_parent; 1856 goto out_put_parent;
1867 } 1857 }
1868 1858
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 9031171c141b..341c6016812d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -957,12 +957,13 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
957 if (WARN_ON(!gic->domain)) 957 if (WARN_ON(!gic->domain))
958 return; 958 return;
959 959
960 if (gic_nr == 0) {
960#ifdef CONFIG_SMP 961#ifdef CONFIG_SMP
961 set_smp_cross_call(gic_raise_softirq); 962 set_smp_cross_call(gic_raise_softirq);
962 register_cpu_notifier(&gic_cpu_notifier); 963 register_cpu_notifier(&gic_cpu_notifier);
963#endif 964#endif
964 965 set_handle_irq(gic_handle_irq);
965 set_handle_irq(gic_handle_irq); 966 }
966 967
967 gic_chip.flags |= gic_arch_extn.flags; 968 gic_chip.flags |= gic_arch_extn.flags;
968 gic_dist_init(gic); 969 gic_dist_init(gic);
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index baf2686aa8eb..02125e6a9109 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
1083 spin_unlock_irqrestore(&card->isdnloop_lock, flags); 1083 spin_unlock_irqrestore(&card->isdnloop_lock, flags);
1084 return -ENOMEM; 1084 return -ENOMEM;
1085 } 1085 }
1086 for (i = 0; i < 3; i++) 1086 for (i = 0; i < 3; i++) {
1087 strcpy(card->s0num[i], sdef.num[i]); 1087 strlcpy(card->s0num[i], sdef.num[i],
1088 sizeof(card->s0num[0]));
1089 }
1088 break; 1090 break;
1089 case ISDN_PTYPE_1TR6: 1091 case ISDN_PTYPE_1TR6:
1090 if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95", 1092 if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
@@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
1097 spin_unlock_irqrestore(&card->isdnloop_lock, flags); 1099 spin_unlock_irqrestore(&card->isdnloop_lock, flags);
1098 return -ENOMEM; 1100 return -ENOMEM;
1099 } 1101 }
1100 strcpy(card->s0num[0], sdef.num[0]); 1102 strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0]));
1101 card->s0num[1][0] = '\0'; 1103 card->s0num[1][0] = '\0';
1102 card->s0num[2][0] = '\0'; 1104 card->s0num[2][0] = '\0';
1103 break; 1105 break;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index e47dcb9d1e91..5cefb479c707 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -117,7 +117,6 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
117{ 117{
118 struct sk_buff *skb; 118 struct sk_buff *skb;
119 struct sock *sk = sock->sk; 119 struct sock *sk = sock->sk;
120 struct sockaddr_mISDN *maddr;
121 120
122 int copied, err; 121 int copied, err;
123 122
@@ -135,9 +134,9 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
135 if (!skb) 134 if (!skb)
136 return err; 135 return err;
137 136
138 if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) { 137 if (msg->msg_name) {
139 msg->msg_namelen = sizeof(struct sockaddr_mISDN); 138 struct sockaddr_mISDN *maddr = msg->msg_name;
140 maddr = (struct sockaddr_mISDN *)msg->msg_name; 139
141 maddr->family = AF_ISDN; 140 maddr->family = AF_ISDN;
142 maddr->dev = _pms(sk)->dev->id; 141 maddr->dev = _pms(sk)->dev->id;
143 if ((sk->sk_protocol == ISDN_P_LAPD_TE) || 142 if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
@@ -150,11 +149,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
150 maddr->sapi = _pms(sk)->ch.addr & 0xFF; 149 maddr->sapi = _pms(sk)->ch.addr & 0xFF;
151 maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; 150 maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
152 } 151 }
153 } else { 152 msg->msg_namelen = sizeof(*maddr);
154 if (msg->msg_namelen)
155 printk(KERN_WARNING "%s: too small namelen %d\n",
156 __func__, msg->msg_namelen);
157 msg->msg_namelen = 0;
158 } 153 }
159 154
160 copied = skb->len + MISDN_HEADER_LEN; 155 copied = skb->len + MISDN_HEADER_LEN;
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 2848171b8576..b31d8e99c419 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -82,22 +82,12 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
82 (sizeof(struct led_pwm_data) * num_leds); 82 (sizeof(struct led_pwm_data) * num_leds);
83} 83}
84 84
85static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev) 85static int led_pwm_create_of(struct platform_device *pdev,
86 struct led_pwm_priv *priv)
86{ 87{
87 struct device_node *node = pdev->dev.of_node; 88 struct device_node *node = pdev->dev.of_node;
88 struct device_node *child; 89 struct device_node *child;
89 struct led_pwm_priv *priv; 90 int ret;
90 int count, ret;
91
92 /* count LEDs in this device, so we know how much to allocate */
93 count = of_get_child_count(node);
94 if (!count)
95 return NULL;
96
97 priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count),
98 GFP_KERNEL);
99 if (!priv)
100 return NULL;
101 91
102 for_each_child_of_node(node, child) { 92 for_each_child_of_node(node, child) {
103 struct led_pwm_data *led_dat = &priv->leds[priv->num_leds]; 93 struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
@@ -109,6 +99,7 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
109 if (IS_ERR(led_dat->pwm)) { 99 if (IS_ERR(led_dat->pwm)) {
110 dev_err(&pdev->dev, "unable to request PWM for %s\n", 100 dev_err(&pdev->dev, "unable to request PWM for %s\n",
111 led_dat->cdev.name); 101 led_dat->cdev.name);
102 ret = PTR_ERR(led_dat->pwm);
112 goto err; 103 goto err;
113 } 104 }
114 /* Get the period from PWM core when n*/ 105 /* Get the period from PWM core when n*/
@@ -137,28 +128,36 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
137 priv->num_leds++; 128 priv->num_leds++;
138 } 129 }
139 130
140 return priv; 131 return 0;
141err: 132err:
142 while (priv->num_leds--) 133 while (priv->num_leds--)
143 led_classdev_unregister(&priv->leds[priv->num_leds].cdev); 134 led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
144 135
145 return NULL; 136 return ret;
146} 137}
147 138
148static int led_pwm_probe(struct platform_device *pdev) 139static int led_pwm_probe(struct platform_device *pdev)
149{ 140{
150 struct led_pwm_platform_data *pdata = dev_get_platdata(&pdev->dev); 141 struct led_pwm_platform_data *pdata = dev_get_platdata(&pdev->dev);
151 struct led_pwm_priv *priv; 142 struct led_pwm_priv *priv;
152 int i, ret = 0; 143 int count, i;
144 int ret = 0;
145
146 if (pdata)
147 count = pdata->num_leds;
148 else
149 count = of_get_child_count(pdev->dev.of_node);
150
151 if (!count)
152 return -EINVAL;
153 153
154 if (pdata && pdata->num_leds) { 154 priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count),
155 priv = devm_kzalloc(&pdev->dev, 155 GFP_KERNEL);
156 sizeof_pwm_leds_priv(pdata->num_leds), 156 if (!priv)
157 GFP_KERNEL); 157 return -ENOMEM;
158 if (!priv)
159 return -ENOMEM;
160 158
161 for (i = 0; i < pdata->num_leds; i++) { 159 if (pdata) {
160 for (i = 0; i < count; i++) {
162 struct led_pwm *cur_led = &pdata->leds[i]; 161 struct led_pwm *cur_led = &pdata->leds[i];
163 struct led_pwm_data *led_dat = &priv->leds[i]; 162 struct led_pwm_data *led_dat = &priv->leds[i];
164 163
@@ -188,11 +187,11 @@ static int led_pwm_probe(struct platform_device *pdev)
188 if (ret < 0) 187 if (ret < 0)
189 goto err; 188 goto err;
190 } 189 }
191 priv->num_leds = pdata->num_leds; 190 priv->num_leds = count;
192 } else { 191 } else {
193 priv = led_pwm_create_of(pdev); 192 ret = led_pwm_create_of(pdev, priv);
194 if (!priv) 193 if (ret)
195 return -ENODEV; 194 return ret;
196 } 195 }
197 196
198 platform_set_drvdata(pdev, priv); 197 platform_set_drvdata(pdev, priv);
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 6753b65f8ede..d2f0120bc878 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_WINDFARM_RM31) += windfarm_fcu_controls.o \
40 windfarm_ad7417_sensor.o \ 40 windfarm_ad7417_sensor.o \
41 windfarm_lm75_sensor.o \ 41 windfarm_lm75_sensor.o \
42 windfarm_lm87_sensor.o \ 42 windfarm_lm87_sensor.o \
43 windfarm_max6690_sensor.o \
43 windfarm_pid.o \ 44 windfarm_pid.o \
44 windfarm_cpufreq_clamp.o \ 45 windfarm_cpufreq_clamp.o \
45 windfarm_rm31.o 46 windfarm_rm31.o
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 173cbb20d104..54bdd923316f 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void)
1717{ 1717{
1718 __u64 mem; 1718 __u64 mem;
1719 1719
1720 dm_bufio_allocated_kmem_cache = 0;
1721 dm_bufio_allocated_get_free_pages = 0;
1722 dm_bufio_allocated_vmalloc = 0;
1723 dm_bufio_current_allocated = 0;
1724
1720 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); 1725 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1721 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); 1726 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1722 1727
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 416b7b752a6e..64780ad73bb0 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -730,15 +730,18 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
730 int r = 0; 730 int r = 0;
731 bool updated = updated_this_tick(mq, e); 731 bool updated = updated_this_tick(mq, e);
732 732
733 requeue_and_update_tick(mq, e);
734
735 if ((!discarded_oblock && updated) || 733 if ((!discarded_oblock && updated) ||
736 !should_promote(mq, e, discarded_oblock, data_dir)) 734 !should_promote(mq, e, discarded_oblock, data_dir)) {
735 requeue_and_update_tick(mq, e);
737 result->op = POLICY_MISS; 736 result->op = POLICY_MISS;
738 else if (!can_migrate) 737
738 } else if (!can_migrate)
739 r = -EWOULDBLOCK; 739 r = -EWOULDBLOCK;
740 else 740
741 else {
742 requeue_and_update_tick(mq, e);
741 r = pre_cache_to_cache(mq, e, result); 743 r = pre_cache_to_cache(mq, e, result);
744 }
742 745
743 return r; 746 return r;
744} 747}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9efcf1059b99..1b1469ebe5cb 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2755,7 +2755,7 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2755{ 2755{
2756 int r; 2756 int r;
2757 2757
2758 r = dm_cache_resize(cache->cmd, cache->cache_size); 2758 r = dm_cache_resize(cache->cmd, new_size);
2759 if (r) { 2759 if (r) {
2760 DMERR("could not resize cache metadata"); 2760 DMERR("could not resize cache metadata");
2761 return r; 2761 return r;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 496d5f3646a5..2f91d6d4a2cc 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -20,6 +20,7 @@
20struct delay_c { 20struct delay_c {
21 struct timer_list delay_timer; 21 struct timer_list delay_timer;
22 struct mutex timer_lock; 22 struct mutex timer_lock;
23 struct workqueue_struct *kdelayd_wq;
23 struct work_struct flush_expired_bios; 24 struct work_struct flush_expired_bios;
24 struct list_head delayed_bios; 25 struct list_head delayed_bios;
25 atomic_t may_delay; 26 atomic_t may_delay;
@@ -45,14 +46,13 @@ struct dm_delay_info {
45 46
46static DEFINE_MUTEX(delayed_bios_lock); 47static DEFINE_MUTEX(delayed_bios_lock);
47 48
48static struct workqueue_struct *kdelayd_wq;
49static struct kmem_cache *delayed_cache; 49static struct kmem_cache *delayed_cache;
50 50
51static void handle_delayed_timer(unsigned long data) 51static void handle_delayed_timer(unsigned long data)
52{ 52{
53 struct delay_c *dc = (struct delay_c *)data; 53 struct delay_c *dc = (struct delay_c *)data;
54 54
55 queue_work(kdelayd_wq, &dc->flush_expired_bios); 55 queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
56} 56}
57 57
58static void queue_timeout(struct delay_c *dc, unsigned long expires) 58static void queue_timeout(struct delay_c *dc, unsigned long expires)
@@ -191,6 +191,12 @@ out:
191 goto bad_dev_write; 191 goto bad_dev_write;
192 } 192 }
193 193
194 dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
195 if (!dc->kdelayd_wq) {
196 DMERR("Couldn't start kdelayd");
197 goto bad_queue;
198 }
199
194 setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); 200 setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
195 201
196 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); 202 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
@@ -203,6 +209,8 @@ out:
203 ti->private = dc; 209 ti->private = dc;
204 return 0; 210 return 0;
205 211
212bad_queue:
213 mempool_destroy(dc->delayed_pool);
206bad_dev_write: 214bad_dev_write:
207 if (dc->dev_write) 215 if (dc->dev_write)
208 dm_put_device(ti, dc->dev_write); 216 dm_put_device(ti, dc->dev_write);
@@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti)
217{ 225{
218 struct delay_c *dc = ti->private; 226 struct delay_c *dc = ti->private;
219 227
220 flush_workqueue(kdelayd_wq); 228 destroy_workqueue(dc->kdelayd_wq);
221 229
222 dm_put_device(ti, dc->dev_read); 230 dm_put_device(ti, dc->dev_read);
223 231
@@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
350{ 358{
351 int r = -ENOMEM; 359 int r = -ENOMEM;
352 360
353 kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
354 if (!kdelayd_wq) {
355 DMERR("Couldn't start kdelayd");
356 goto bad_queue;
357 }
358
359 delayed_cache = KMEM_CACHE(dm_delay_info, 0); 361 delayed_cache = KMEM_CACHE(dm_delay_info, 0);
360 if (!delayed_cache) { 362 if (!delayed_cache) {
361 DMERR("Couldn't create delayed bio cache."); 363 DMERR("Couldn't create delayed bio cache.");
@@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
373bad_register: 375bad_register:
374 kmem_cache_destroy(delayed_cache); 376 kmem_cache_destroy(delayed_cache);
375bad_memcache: 377bad_memcache:
376 destroy_workqueue(kdelayd_wq);
377bad_queue:
378 return r; 378 return r;
379} 379}
380 380
@@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
382{ 382{
383 dm_unregister_target(&delay_target); 383 dm_unregister_target(&delay_target);
384 kmem_cache_destroy(delayed_cache); 384 kmem_cache_destroy(delayed_cache);
385 destroy_workqueue(kdelayd_wq);
386} 385}
387 386
388/* Module hooks */ 387/* Module hooks */
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index aec57d76db5d..944690bafd93 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -66,6 +66,18 @@ struct dm_snapshot {
66 66
67 atomic_t pending_exceptions_count; 67 atomic_t pending_exceptions_count;
68 68
69 /* Protected by "lock" */
70 sector_t exception_start_sequence;
71
72 /* Protected by kcopyd single-threaded callback */
73 sector_t exception_complete_sequence;
74
75 /*
76 * A list of pending exceptions that completed out of order.
77 * Protected by kcopyd single-threaded callback.
78 */
79 struct list_head out_of_order_list;
80
69 mempool_t *pending_pool; 81 mempool_t *pending_pool;
70 82
71 struct dm_exception_table pending; 83 struct dm_exception_table pending;
@@ -173,6 +185,14 @@ struct dm_snap_pending_exception {
173 */ 185 */
174 int started; 186 int started;
175 187
188 /* There was copying error. */
189 int copy_error;
190
191 /* A sequence number, it is used for in-order completion. */
192 sector_t exception_sequence;
193
194 struct list_head out_of_order_entry;
195
176 /* 196 /*
177 * For writing a complete chunk, bypassing the copy. 197 * For writing a complete chunk, bypassing the copy.
178 */ 198 */
@@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1094 s->valid = 1; 1114 s->valid = 1;
1095 s->active = 0; 1115 s->active = 0;
1096 atomic_set(&s->pending_exceptions_count, 0); 1116 atomic_set(&s->pending_exceptions_count, 0);
1117 s->exception_start_sequence = 0;
1118 s->exception_complete_sequence = 0;
1119 INIT_LIST_HEAD(&s->out_of_order_list);
1097 init_rwsem(&s->lock); 1120 init_rwsem(&s->lock);
1098 INIT_LIST_HEAD(&s->list); 1121 INIT_LIST_HEAD(&s->list);
1099 spin_lock_init(&s->pe_lock); 1122 spin_lock_init(&s->pe_lock);
@@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success)
1443 pending_complete(pe, success); 1466 pending_complete(pe, success);
1444} 1467}
1445 1468
1469static void complete_exception(struct dm_snap_pending_exception *pe)
1470{
1471 struct dm_snapshot *s = pe->snap;
1472
1473 if (unlikely(pe->copy_error))
1474 pending_complete(pe, 0);
1475
1476 else
1477 /* Update the metadata if we are persistent */
1478 s->store->type->commit_exception(s->store, &pe->e,
1479 commit_callback, pe);
1480}
1481
1446/* 1482/*
1447 * Called when the copy I/O has finished. kcopyd actually runs 1483 * Called when the copy I/O has finished. kcopyd actually runs
1448 * this code so don't block. 1484 * this code so don't block.
@@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
1452 struct dm_snap_pending_exception *pe = context; 1488 struct dm_snap_pending_exception *pe = context;
1453 struct dm_snapshot *s = pe->snap; 1489 struct dm_snapshot *s = pe->snap;
1454 1490
1455 if (read_err || write_err) 1491 pe->copy_error = read_err || write_err;
1456 pending_complete(pe, 0);
1457 1492
1458 else 1493 if (pe->exception_sequence == s->exception_complete_sequence) {
1459 /* Update the metadata if we are persistent */ 1494 s->exception_complete_sequence++;
1460 s->store->type->commit_exception(s->store, &pe->e, 1495 complete_exception(pe);
1461 commit_callback, pe); 1496
1497 while (!list_empty(&s->out_of_order_list)) {
1498 pe = list_entry(s->out_of_order_list.next,
1499 struct dm_snap_pending_exception, out_of_order_entry);
1500 if (pe->exception_sequence != s->exception_complete_sequence)
1501 break;
1502 s->exception_complete_sequence++;
1503 list_del(&pe->out_of_order_entry);
1504 complete_exception(pe);
1505 }
1506 } else {
1507 struct list_head *lh;
1508 struct dm_snap_pending_exception *pe2;
1509
1510 list_for_each_prev(lh, &s->out_of_order_list) {
1511 pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1512 if (pe2->exception_sequence < pe->exception_sequence)
1513 break;
1514 }
1515 list_add(&pe->out_of_order_entry, lh);
1516 }
1462} 1517}
1463 1518
1464/* 1519/*
@@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s,
1553 return NULL; 1608 return NULL;
1554 } 1609 }
1555 1610
1611 pe->exception_sequence = s->exception_start_sequence++;
1612
1556 dm_insert_exception(&s->pending, &pe->e); 1613 dm_insert_exception(&s->pending, &pe->e);
1557 1614
1558 return pe; 1615 return pe;
@@ -2192,7 +2249,7 @@ static struct target_type origin_target = {
2192 2249
2193static struct target_type snapshot_target = { 2250static struct target_type snapshot_target = {
2194 .name = "snapshot", 2251 .name = "snapshot",
2195 .version = {1, 11, 1}, 2252 .version = {1, 12, 0},
2196 .module = THIS_MODULE, 2253 .module = THIS_MODULE,
2197 .ctr = snapshot_ctr, 2254 .ctr = snapshot_ctr,
2198 .dtr = snapshot_dtr, 2255 .dtr = snapshot_dtr,
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 3d404c1371ed..28a90122a5a8 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -964,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
964 964
965int __init dm_statistics_init(void) 965int __init dm_statistics_init(void)
966{ 966{
967 shared_memory_amount = 0;
967 dm_stat_need_rcu_barrier = 0; 968 dm_stat_need_rcu_barrier = 0;
968 return 0; 969 return 0;
969} 970}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 465f08ca62b1..3ba6a3859ce3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -200,6 +200,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
200 200
201 num_targets = dm_round_up(num_targets, KEYS_PER_NODE); 201 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
202 202
203 if (!num_targets) {
204 kfree(t);
205 return -ENOMEM;
206 }
207
203 if (alloc_targets(t, num_targets)) { 208 if (alloc_targets(t, num_targets)) {
204 kfree(t); 209 kfree(t);
205 return -ENOMEM; 210 return -ENOMEM;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 60bce435f4fa..8a30ad54bd46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1697,6 +1697,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
1697 up_write(&pmd->root_lock); 1697 up_write(&pmd->root_lock);
1698} 1698}
1699 1699
1700void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
1701{
1702 down_write(&pmd->root_lock);
1703 pmd->read_only = false;
1704 dm_bm_set_read_write(pmd->bm);
1705 up_write(&pmd->root_lock);
1706}
1707
1700int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, 1708int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
1701 dm_block_t threshold, 1709 dm_block_t threshold,
1702 dm_sm_threshold_fn fn, 1710 dm_sm_threshold_fn fn,
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 845ebbe589a9..7bcc0e1d6238 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -193,6 +193,7 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz
193 * that nothing is changing. 193 * that nothing is changing.
194 */ 194 */
195void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); 195void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
196void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd);
196 197
197int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, 198int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
198 dm_block_t threshold, 199 dm_block_t threshold,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2c0cf511ec23..ee29037ffc2e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
640 */ 640 */
641 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); 641 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
642 if (r) { 642 if (r) {
643 DMERR_LIMIT("dm_thin_insert_block() failed"); 643 DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
644 dm_device_name(pool->pool_md), r);
645 set_pool_mode(pool, PM_READ_ONLY);
644 cell_error(pool, m->cell); 646 cell_error(pool, m->cell);
645 goto out; 647 goto out;
646 } 648 }
@@ -881,32 +883,23 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
881 } 883 }
882} 884}
883 885
884static int commit(struct pool *pool)
885{
886 int r;
887
888 r = dm_pool_commit_metadata(pool->pmd);
889 if (r)
890 DMERR_LIMIT("%s: commit failed: error = %d",
891 dm_device_name(pool->pool_md), r);
892
893 return r;
894}
895
896/* 886/*
897 * A non-zero return indicates read_only or fail_io mode. 887 * A non-zero return indicates read_only or fail_io mode.
898 * Many callers don't care about the return value. 888 * Many callers don't care about the return value.
899 */ 889 */
900static int commit_or_fallback(struct pool *pool) 890static int commit(struct pool *pool)
901{ 891{
902 int r; 892 int r;
903 893
904 if (get_pool_mode(pool) != PM_WRITE) 894 if (get_pool_mode(pool) != PM_WRITE)
905 return -EINVAL; 895 return -EINVAL;
906 896
907 r = commit(pool); 897 r = dm_pool_commit_metadata(pool->pmd);
908 if (r) 898 if (r) {
899 DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d",
900 dm_device_name(pool->pool_md), r);
909 set_pool_mode(pool, PM_READ_ONLY); 901 set_pool_mode(pool, PM_READ_ONLY);
902 }
910 903
911 return r; 904 return r;
912} 905}
@@ -943,7 +936,9 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
943 * Try to commit to see if that will free up some 936 * Try to commit to see if that will free up some
944 * more space. 937 * more space.
945 */ 938 */
946 (void) commit_or_fallback(pool); 939 r = commit(pool);
940 if (r)
941 return r;
947 942
948 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); 943 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
949 if (r) 944 if (r)
@@ -957,7 +952,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
957 * table reload). 952 * table reload).
958 */ 953 */
959 if (!free_blocks) { 954 if (!free_blocks) {
960 DMWARN("%s: no free space available.", 955 DMWARN("%s: no free data space available.",
961 dm_device_name(pool->pool_md)); 956 dm_device_name(pool->pool_md));
962 spin_lock_irqsave(&pool->lock, flags); 957 spin_lock_irqsave(&pool->lock, flags);
963 pool->no_free_space = 1; 958 pool->no_free_space = 1;
@@ -967,8 +962,16 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
967 } 962 }
968 963
969 r = dm_pool_alloc_data_block(pool->pmd, result); 964 r = dm_pool_alloc_data_block(pool->pmd, result);
970 if (r) 965 if (r) {
966 if (r == -ENOSPC &&
967 !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
968 !free_blocks) {
969 DMWARN("%s: no free metadata space available.",
970 dm_device_name(pool->pool_md));
971 set_pool_mode(pool, PM_READ_ONLY);
972 }
971 return r; 973 return r;
974 }
972 975
973 return 0; 976 return 0;
974} 977}
@@ -1349,7 +1352,7 @@ static void process_deferred_bios(struct pool *pool)
1349 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) 1352 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1350 return; 1353 return;
1351 1354
1352 if (commit_or_fallback(pool)) { 1355 if (commit(pool)) {
1353 while ((bio = bio_list_pop(&bios))) 1356 while ((bio = bio_list_pop(&bios)))
1354 bio_io_error(bio); 1357 bio_io_error(bio);
1355 return; 1358 return;
@@ -1397,6 +1400,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1397 case PM_FAIL: 1400 case PM_FAIL:
1398 DMERR("%s: switching pool to failure mode", 1401 DMERR("%s: switching pool to failure mode",
1399 dm_device_name(pool->pool_md)); 1402 dm_device_name(pool->pool_md));
1403 dm_pool_metadata_read_only(pool->pmd);
1400 pool->process_bio = process_bio_fail; 1404 pool->process_bio = process_bio_fail;
1401 pool->process_discard = process_bio_fail; 1405 pool->process_discard = process_bio_fail;
1402 pool->process_prepared_mapping = process_prepared_mapping_fail; 1406 pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1421,6 +1425,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1421 break; 1425 break;
1422 1426
1423 case PM_WRITE: 1427 case PM_WRITE:
1428 dm_pool_metadata_read_write(pool->pmd);
1424 pool->process_bio = process_bio; 1429 pool->process_bio = process_bio;
1425 pool->process_discard = process_discard; 1430 pool->process_discard = process_discard;
1426 pool->process_prepared_mapping = process_prepared_mapping; 1431 pool->process_prepared_mapping = process_prepared_mapping;
@@ -1637,12 +1642,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1637 struct pool_c *pt = ti->private; 1642 struct pool_c *pt = ti->private;
1638 1643
1639 /* 1644 /*
1640 * We want to make sure that degraded pools are never upgraded. 1645 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
1641 */ 1646 */
1642 enum pool_mode old_mode = pool->pf.mode; 1647 enum pool_mode old_mode = pool->pf.mode;
1643 enum pool_mode new_mode = pt->adjusted_pf.mode; 1648 enum pool_mode new_mode = pt->adjusted_pf.mode;
1644 1649
1645 if (old_mode > new_mode) 1650 /*
1651 * If we were in PM_FAIL mode, rollback of metadata failed. We're
1652 * not going to recover without a thin_repair. So we never let the
1653 * pool move out of the old mode. On the other hand a PM_READ_ONLY
1654 * may have been due to a lack of metadata or data space, and may
1655 * now work (ie. if the underlying devices have been resized).
1656 */
1657 if (old_mode == PM_FAIL)
1646 new_mode = old_mode; 1658 new_mode = old_mode;
1647 1659
1648 pool->ti = ti; 1660 pool->ti = ti;
@@ -2266,7 +2278,7 @@ static int pool_preresume(struct dm_target *ti)
2266 return r; 2278 return r;
2267 2279
2268 if (need_commit1 || need_commit2) 2280 if (need_commit1 || need_commit2)
2269 (void) commit_or_fallback(pool); 2281 (void) commit(pool);
2270 2282
2271 return 0; 2283 return 0;
2272} 2284}
@@ -2293,7 +2305,7 @@ static void pool_postsuspend(struct dm_target *ti)
2293 2305
2294 cancel_delayed_work(&pool->waker); 2306 cancel_delayed_work(&pool->waker);
2295 flush_workqueue(pool->wq); 2307 flush_workqueue(pool->wq);
2296 (void) commit_or_fallback(pool); 2308 (void) commit(pool);
2297} 2309}
2298 2310
2299static int check_arg_count(unsigned argc, unsigned args_required) 2311static int check_arg_count(unsigned argc, unsigned args_required)
@@ -2427,7 +2439,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
2427 if (r) 2439 if (r)
2428 return r; 2440 return r;
2429 2441
2430 (void) commit_or_fallback(pool); 2442 (void) commit(pool);
2431 2443
2432 r = dm_pool_reserve_metadata_snap(pool->pmd); 2444 r = dm_pool_reserve_metadata_snap(pool->pmd);
2433 if (r) 2445 if (r)
@@ -2489,7 +2501,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2489 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); 2501 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2490 2502
2491 if (!r) 2503 if (!r)
2492 (void) commit_or_fallback(pool); 2504 (void) commit(pool);
2493 2505
2494 return r; 2506 return r;
2495} 2507}
@@ -2544,7 +2556,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
2544 2556
2545 /* Commit to ensure statistics aren't out-of-date */ 2557 /* Commit to ensure statistics aren't out-of-date */
2546 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) 2558 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2547 (void) commit_or_fallback(pool); 2559 (void) commit(pool);
2548 2560
2549 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); 2561 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2550 if (r) { 2562 if (r) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8766eabb0014..21f4d7ff0da2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev)
112 112
113static struct ctl_table_header *raid_table_header; 113static struct ctl_table_header *raid_table_header;
114 114
115static ctl_table raid_table[] = { 115static struct ctl_table raid_table[] = {
116 { 116 {
117 .procname = "speed_limit_min", 117 .procname = "speed_limit_min",
118 .data = &sysctl_speed_limit_min, 118 .data = &sysctl_speed_limit_min,
@@ -130,7 +130,7 @@ static ctl_table raid_table[] = {
130 { } 130 { }
131}; 131};
132 132
133static ctl_table raid_dir_table[] = { 133static struct ctl_table raid_dir_table[] = {
134 { 134 {
135 .procname = "raid", 135 .procname = "raid",
136 .maxlen = 0, 136 .maxlen = 0,
@@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = {
140 { } 140 { }
141}; 141};
142 142
143static ctl_table raid_root_table[] = { 143static struct ctl_table raid_root_table[] = {
144 { 144 {
145 .procname = "dev", 145 .procname = "dev",
146 .maxlen = 0, 146 .maxlen = 0,
@@ -562,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit)
562 goto retry; 562 goto retry;
563} 563}
564 564
565static inline int mddev_lock(struct mddev * mddev) 565static inline int __must_check mddev_lock(struct mddev * mddev)
566{ 566{
567 return mutex_lock_interruptible(&mddev->reconfig_mutex); 567 return mutex_lock_interruptible(&mddev->reconfig_mutex);
568} 568}
569 569
570/* Sometimes we need to take the lock in a situation where
571 * failure due to interrupts is not acceptable.
572 */
573static inline void mddev_lock_nointr(struct mddev * mddev)
574{
575 mutex_lock(&mddev->reconfig_mutex);
576}
577
570static inline int mddev_is_locked(struct mddev *mddev) 578static inline int mddev_is_locked(struct mddev *mddev)
571{ 579{
572 return mutex_is_locked(&mddev->reconfig_mutex); 580 return mutex_is_locked(&mddev->reconfig_mutex);
@@ -768,16 +776,10 @@ void md_super_wait(struct mddev *mddev)
768 finish_wait(&mddev->sb_wait, &wq); 776 finish_wait(&mddev->sb_wait, &wq);
769} 777}
770 778
771static void bi_complete(struct bio *bio, int error)
772{
773 complete((struct completion*)bio->bi_private);
774}
775
776int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 779int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
777 struct page *page, int rw, bool metadata_op) 780 struct page *page, int rw, bool metadata_op)
778{ 781{
779 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 782 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
780 struct completion event;
781 int ret; 783 int ret;
782 784
783 rw |= REQ_SYNC; 785 rw |= REQ_SYNC;
@@ -793,11 +795,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
793 else 795 else
794 bio->bi_sector = sector + rdev->data_offset; 796 bio->bi_sector = sector + rdev->data_offset;
795 bio_add_page(bio, page, size, 0); 797 bio_add_page(bio, page, size, 0);
796 init_completion(&event); 798 submit_bio_wait(rw, bio);
797 bio->bi_private = &event;
798 bio->bi_end_io = bi_complete;
799 submit_bio(rw, bio);
800 wait_for_completion(&event);
801 799
802 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 800 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
803 bio_put(bio); 801 bio_put(bio);
@@ -2978,7 +2976,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2978 for_each_mddev(mddev, tmp) { 2976 for_each_mddev(mddev, tmp) {
2979 struct md_rdev *rdev2; 2977 struct md_rdev *rdev2;
2980 2978
2981 mddev_lock(mddev); 2979 mddev_lock_nointr(mddev);
2982 rdev_for_each(rdev2, mddev) 2980 rdev_for_each(rdev2, mddev)
2983 if (rdev->bdev == rdev2->bdev && 2981 if (rdev->bdev == rdev2->bdev &&
2984 rdev != rdev2 && 2982 rdev != rdev2 &&
@@ -2994,7 +2992,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2994 break; 2992 break;
2995 } 2993 }
2996 } 2994 }
2997 mddev_lock(my_mddev); 2995 mddev_lock_nointr(my_mddev);
2998 if (overlap) { 2996 if (overlap) {
2999 /* Someone else could have slipped in a size 2997 /* Someone else could have slipped in a size
3000 * change here, but doing so is just silly. 2998 * change here, but doing so is just silly.
@@ -3580,6 +3578,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3580 mddev->in_sync = 1; 3578 mddev->in_sync = 1;
3581 del_timer_sync(&mddev->safemode_timer); 3579 del_timer_sync(&mddev->safemode_timer);
3582 } 3580 }
3581 blk_set_stacking_limits(&mddev->queue->limits);
3583 pers->run(mddev); 3582 pers->run(mddev);
3584 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3583 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3585 mddev_resume(mddev); 3584 mddev_resume(mddev);
@@ -5258,7 +5257,7 @@ static void __md_stop_writes(struct mddev *mddev)
5258 5257
5259void md_stop_writes(struct mddev *mddev) 5258void md_stop_writes(struct mddev *mddev)
5260{ 5259{
5261 mddev_lock(mddev); 5260 mddev_lock_nointr(mddev);
5262 __md_stop_writes(mddev); 5261 __md_stop_writes(mddev);
5263 mddev_unlock(mddev); 5262 mddev_unlock(mddev);
5264} 5263}
@@ -5291,20 +5290,35 @@ EXPORT_SYMBOL_GPL(md_stop);
5291static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 5290static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5292{ 5291{
5293 int err = 0; 5292 int err = 0;
5293 int did_freeze = 0;
5294
5295 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5296 did_freeze = 1;
5297 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5298 md_wakeup_thread(mddev->thread);
5299 }
5300 if (mddev->sync_thread) {
5301 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5302 /* Thread might be blocked waiting for metadata update
5303 * which will now never happen */
5304 wake_up_process(mddev->sync_thread->tsk);
5305 }
5306 mddev_unlock(mddev);
5307 wait_event(resync_wait, mddev->sync_thread == NULL);
5308 mddev_lock_nointr(mddev);
5309
5294 mutex_lock(&mddev->open_mutex); 5310 mutex_lock(&mddev->open_mutex);
5295 if (atomic_read(&mddev->openers) > !!bdev) { 5311 if (atomic_read(&mddev->openers) > !!bdev ||
5312 mddev->sync_thread ||
5313 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5296 printk("md: %s still in use.\n",mdname(mddev)); 5314 printk("md: %s still in use.\n",mdname(mddev));
5315 if (did_freeze) {
5316 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5317 md_wakeup_thread(mddev->thread);
5318 }
5297 err = -EBUSY; 5319 err = -EBUSY;
5298 goto out; 5320 goto out;
5299 } 5321 }
5300 if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
5301 /* Someone opened the device since we flushed it
5302 * so page cache could be dirty and it is too late
5303 * to flush. So abort
5304 */
5305 mutex_unlock(&mddev->open_mutex);
5306 return -EBUSY;
5307 }
5308 if (mddev->pers) { 5322 if (mddev->pers) {
5309 __md_stop_writes(mddev); 5323 __md_stop_writes(mddev);
5310 5324
@@ -5315,7 +5329,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5315 set_disk_ro(mddev->gendisk, 1); 5329 set_disk_ro(mddev->gendisk, 1);
5316 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5330 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5317 sysfs_notify_dirent_safe(mddev->sysfs_state); 5331 sysfs_notify_dirent_safe(mddev->sysfs_state);
5318 err = 0; 5332 err = 0;
5319 } 5333 }
5320out: 5334out:
5321 mutex_unlock(&mddev->open_mutex); 5335 mutex_unlock(&mddev->open_mutex);
@@ -5331,20 +5345,34 @@ static int do_md_stop(struct mddev * mddev, int mode,
5331{ 5345{
5332 struct gendisk *disk = mddev->gendisk; 5346 struct gendisk *disk = mddev->gendisk;
5333 struct md_rdev *rdev; 5347 struct md_rdev *rdev;
5348 int did_freeze = 0;
5349
5350 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5351 did_freeze = 1;
5352 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5353 md_wakeup_thread(mddev->thread);
5354 }
5355 if (mddev->sync_thread) {
5356 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5357 /* Thread might be blocked waiting for metadata update
5358 * which will now never happen */
5359 wake_up_process(mddev->sync_thread->tsk);
5360 }
5361 mddev_unlock(mddev);
5362 wait_event(resync_wait, mddev->sync_thread == NULL);
5363 mddev_lock_nointr(mddev);
5334 5364
5335 mutex_lock(&mddev->open_mutex); 5365 mutex_lock(&mddev->open_mutex);
5336 if (atomic_read(&mddev->openers) > !!bdev || 5366 if (atomic_read(&mddev->openers) > !!bdev ||
5337 mddev->sysfs_active) { 5367 mddev->sysfs_active ||
5368 mddev->sync_thread ||
5369 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5338 printk("md: %s still in use.\n",mdname(mddev)); 5370 printk("md: %s still in use.\n",mdname(mddev));
5339 mutex_unlock(&mddev->open_mutex); 5371 mutex_unlock(&mddev->open_mutex);
5340 return -EBUSY; 5372 if (did_freeze) {
5341 } 5373 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5342 if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { 5374 md_wakeup_thread(mddev->thread);
5343 /* Someone opened the device since we flushed it 5375 }
5344 * so page cache could be dirty and it is too late
5345 * to flush. So abort
5346 */
5347 mutex_unlock(&mddev->open_mutex);
5348 return -EBUSY; 5376 return -EBUSY;
5349 } 5377 }
5350 if (mddev->pers) { 5378 if (mddev->pers) {
@@ -6551,7 +6579,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
6551 wait_event(mddev->sb_wait, 6579 wait_event(mddev->sb_wait,
6552 !test_bit(MD_CHANGE_DEVS, &mddev->flags) && 6580 !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
6553 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6581 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6554 mddev_lock(mddev); 6582 mddev_lock_nointr(mddev);
6555 } 6583 }
6556 } else { 6584 } else {
6557 err = -EROFS; 6585 err = -EROFS;
@@ -7361,9 +7389,6 @@ void md_do_sync(struct md_thread *thread)
7361 mddev->curr_resync = 2; 7389 mddev->curr_resync = 2;
7362 7390
7363 try_again: 7391 try_again:
7364 if (kthread_should_stop())
7365 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7366
7367 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7392 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7368 goto skip; 7393 goto skip;
7369 for_each_mddev(mddev2, tmp) { 7394 for_each_mddev(mddev2, tmp) {
@@ -7388,7 +7413,7 @@ void md_do_sync(struct md_thread *thread)
7388 * be caught by 'softlockup' 7413 * be caught by 'softlockup'
7389 */ 7414 */
7390 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 7415 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7391 if (!kthread_should_stop() && 7416 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7392 mddev2->curr_resync >= mddev->curr_resync) { 7417 mddev2->curr_resync >= mddev->curr_resync) {
7393 printk(KERN_INFO "md: delaying %s of %s" 7418 printk(KERN_INFO "md: delaying %s of %s"
7394 " until %s has finished (they" 7419 " until %s has finished (they"
@@ -7464,7 +7489,7 @@ void md_do_sync(struct md_thread *thread)
7464 last_check = 0; 7489 last_check = 0;
7465 7490
7466 if (j>2) { 7491 if (j>2) {
7467 printk(KERN_INFO 7492 printk(KERN_INFO
7468 "md: resuming %s of %s from checkpoint.\n", 7493 "md: resuming %s of %s from checkpoint.\n",
7469 desc, mdname(mddev)); 7494 desc, mdname(mddev));
7470 mddev->curr_resync = j; 7495 mddev->curr_resync = j;
@@ -7501,7 +7526,8 @@ void md_do_sync(struct md_thread *thread)
7501 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7526 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7502 } 7527 }
7503 7528
7504 while (j >= mddev->resync_max && !kthread_should_stop()) { 7529 while (j >= mddev->resync_max &&
7530 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7505 /* As this condition is controlled by user-space, 7531 /* As this condition is controlled by user-space,
7506 * we can block indefinitely, so use '_interruptible' 7532 * we can block indefinitely, so use '_interruptible'
7507 * to avoid triggering warnings. 7533 * to avoid triggering warnings.
@@ -7509,17 +7535,18 @@ void md_do_sync(struct md_thread *thread)
7509 flush_signals(current); /* just in case */ 7535 flush_signals(current); /* just in case */
7510 wait_event_interruptible(mddev->recovery_wait, 7536 wait_event_interruptible(mddev->recovery_wait,
7511 mddev->resync_max > j 7537 mddev->resync_max > j
7512 || kthread_should_stop()); 7538 || test_bit(MD_RECOVERY_INTR,
7539 &mddev->recovery));
7513 } 7540 }
7514 7541
7515 if (kthread_should_stop()) 7542 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7516 goto interrupted; 7543 break;
7517 7544
7518 sectors = mddev->pers->sync_request(mddev, j, &skipped, 7545 sectors = mddev->pers->sync_request(mddev, j, &skipped,
7519 currspeed < speed_min(mddev)); 7546 currspeed < speed_min(mddev));
7520 if (sectors == 0) { 7547 if (sectors == 0) {
7521 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7548 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7522 goto out; 7549 break;
7523 } 7550 }
7524 7551
7525 if (!skipped) { /* actual IO requested */ 7552 if (!skipped) { /* actual IO requested */
@@ -7556,10 +7583,8 @@ void md_do_sync(struct md_thread *thread)
7556 last_mark = next; 7583 last_mark = next;
7557 } 7584 }
7558 7585
7559 7586 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7560 if (kthread_should_stop()) 7587 break;
7561 goto interrupted;
7562
7563 7588
7564 /* 7589 /*
7565 * this loop exits only if either when we are slower than 7590 * this loop exits only if either when we are slower than
@@ -7582,11 +7607,12 @@ void md_do_sync(struct md_thread *thread)
7582 } 7607 }
7583 } 7608 }
7584 } 7609 }
7585 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); 7610 printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
7611 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
7612 ? "interrupted" : "done");
7586 /* 7613 /*
7587 * this also signals 'finished resyncing' to md_stop 7614 * this also signals 'finished resyncing' to md_stop
7588 */ 7615 */
7589 out:
7590 blk_finish_plug(&plug); 7616 blk_finish_plug(&plug);
7591 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 7617 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
7592 7618
@@ -7640,16 +7666,6 @@ void md_do_sync(struct md_thread *thread)
7640 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 7666 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
7641 md_wakeup_thread(mddev->thread); 7667 md_wakeup_thread(mddev->thread);
7642 return; 7668 return;
7643
7644 interrupted:
7645 /*
7646 * got a signal, exit.
7647 */
7648 printk(KERN_INFO
7649 "md: md_do_sync() got signal ... exiting\n");
7650 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7651 goto out;
7652
7653} 7669}
7654EXPORT_SYMBOL_GPL(md_do_sync); 7670EXPORT_SYMBOL_GPL(md_do_sync);
7655 7671
@@ -7751,7 +7767,7 @@ void md_check_recovery(struct mddev *mddev)
7751 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7767 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7752 return; 7768 return;
7753 if ( ! ( 7769 if ( ! (
7754 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || 7770 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
7755 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7771 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7756 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7772 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7757 (mddev->external == 0 && mddev->safemode == 1) || 7773 (mddev->external == 0 && mddev->safemode == 1) ||
@@ -7894,6 +7910,7 @@ void md_reap_sync_thread(struct mddev *mddev)
7894 7910
7895 /* resync has finished, collect result */ 7911 /* resync has finished, collect result */
7896 md_unregister_thread(&mddev->sync_thread); 7912 md_unregister_thread(&mddev->sync_thread);
7913 wake_up(&resync_wait);
7897 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7914 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7898 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7915 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7899 /* success...*/ 7916 /* success...*/
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index af96e24ec328..1d75b1dc1e2e 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
317 * The shadow op will often be a noop. Only insert if it really 317 * The shadow op will often be a noop. Only insert if it really
318 * copied data. 318 * copied data.
319 */ 319 */
320 if (dm_block_location(*block) != b) 320 if (dm_block_location(*block) != b) {
321 /*
322 * dm_tm_shadow_block will have already decremented the old
323 * block, but it is still referenced by the btree. We
324 * increment to stop the insert decrementing it below zero
325 * when overwriting the old value.
326 */
327 dm_tm_inc(info->btree_info.tm, b);
321 r = insert_ablock(info, index, *block, root); 328 r = insert_ablock(info, index, *block, root);
329 }
322 330
323 return r; 331 return r;
324} 332}
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index a7e8bf296388..064a3c271baa 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -626,6 +626,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm)
626} 626}
627EXPORT_SYMBOL_GPL(dm_bm_set_read_only); 627EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
628 628
629void dm_bm_set_read_write(struct dm_block_manager *bm)
630{
631 bm->read_only = false;
632}
633EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
634
629u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor) 635u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
630{ 636{
631 return crc32c(~(u32) 0, data, len) ^ init_xor; 637 return crc32c(~(u32) 0, data, len) ^ init_xor;
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index 9a82083a66b6..13cd58e1fe69 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -108,9 +108,9 @@ int dm_bm_unlock(struct dm_block *b);
108int dm_bm_flush_and_unlock(struct dm_block_manager *bm, 108int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
109 struct dm_block *superblock); 109 struct dm_block *superblock);
110 110
111 /* 111/*
112 * Request data be prefetched into the cache. 112 * Request data is prefetched into the cache.
113 */ 113 */
114void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); 114void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
115 115
116/* 116/*
@@ -125,6 +125,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
125 * be returned if you do. 125 * be returned if you do.
126 */ 126 */
127void dm_bm_set_read_only(struct dm_block_manager *bm); 127void dm_bm_set_read_only(struct dm_block_manager *bm);
128void dm_bm_set_read_write(struct dm_block_manager *bm);
128 129
129u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor); 130u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor);
130 131
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 6058569fe86c..466a60bbd716 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -381,7 +381,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
381} 381}
382 382
383static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, 383static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
384 uint32_t (*mutator)(void *context, uint32_t old), 384 int (*mutator)(void *context, uint32_t old, uint32_t *new),
385 void *context, enum allocation_event *ev) 385 void *context, enum allocation_event *ev)
386{ 386{
387 int r; 387 int r;
@@ -410,11 +410,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
410 410
411 if (old > 2) { 411 if (old > 2) {
412 r = sm_ll_lookup_big_ref_count(ll, b, &old); 412 r = sm_ll_lookup_big_ref_count(ll, b, &old);
413 if (r < 0) 413 if (r < 0) {
414 dm_tm_unlock(ll->tm, nb);
414 return r; 415 return r;
416 }
415 } 417 }
416 418
417 ref_count = mutator(context, old); 419 r = mutator(context, old, &ref_count);
420 if (r) {
421 dm_tm_unlock(ll->tm, nb);
422 return r;
423 }
418 424
419 if (ref_count <= 2) { 425 if (ref_count <= 2) {
420 sm_set_bitmap(bm_le, bit, ref_count); 426 sm_set_bitmap(bm_le, bit, ref_count);
@@ -465,9 +471,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
465 return ll->save_ie(ll, index, &ie_disk); 471 return ll->save_ie(ll, index, &ie_disk);
466} 472}
467 473
468static uint32_t set_ref_count(void *context, uint32_t old) 474static int set_ref_count(void *context, uint32_t old, uint32_t *new)
469{ 475{
470 return *((uint32_t *) context); 476 *new = *((uint32_t *) context);
477 return 0;
471} 478}
472 479
473int sm_ll_insert(struct ll_disk *ll, dm_block_t b, 480int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
@@ -476,9 +483,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
476 return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev); 483 return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
477} 484}
478 485
479static uint32_t inc_ref_count(void *context, uint32_t old) 486static int inc_ref_count(void *context, uint32_t old, uint32_t *new)
480{ 487{
481 return old + 1; 488 *new = old + 1;
489 return 0;
482} 490}
483 491
484int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) 492int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
@@ -486,9 +494,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
486 return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev); 494 return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
487} 495}
488 496
489static uint32_t dec_ref_count(void *context, uint32_t old) 497static int dec_ref_count(void *context, uint32_t old, uint32_t *new)
490{ 498{
491 return old - 1; 499 if (!old) {
500 DMERR_LIMIT("unable to decrement a reference count below 0");
501 return -EINVAL;
502 }
503
504 *new = old - 1;
505 return 0;
492} 506}
493 507
494int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) 508int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 1c959684caef..58fc1eef7499 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
384 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 384 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
385 385
386 int r = sm_metadata_new_block_(sm, b); 386 int r = sm_metadata_new_block_(sm, b);
387 if (r) 387 if (r) {
388 DMERR("unable to allocate new metadata block"); 388 DMERR("unable to allocate new metadata block");
389 return r;
390 }
389 391
390 r = sm_metadata_get_nr_free(sm, &count); 392 r = sm_metadata_get_nr_free(sm, &count);
391 if (r) 393 if (r) {
392 DMERR("couldn't get free block count"); 394 DMERR("couldn't get free block count");
395 return r;
396 }
393 397
394 check_threshold(&smm->threshold, count); 398 check_threshold(&smm->threshold, count);
395 399
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index af6681b19776..1e5a540995e9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -66,7 +66,8 @@
66 */ 66 */
67static int max_queued_requests = 1024; 67static int max_queued_requests = 1024;
68 68
69static void allow_barrier(struct r1conf *conf); 69static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
70 sector_t bi_sector);
70static void lower_barrier(struct r1conf *conf); 71static void lower_barrier(struct r1conf *conf);
71 72
72static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 73static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
@@ -84,10 +85,12 @@ static void r1bio_pool_free(void *r1_bio, void *data)
84} 85}
85 86
86#define RESYNC_BLOCK_SIZE (64*1024) 87#define RESYNC_BLOCK_SIZE (64*1024)
87//#define RESYNC_BLOCK_SIZE PAGE_SIZE 88#define RESYNC_DEPTH 32
88#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 89#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
89#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 90#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
90#define RESYNC_WINDOW (2048*1024) 91#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
92#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
93#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
91 94
92static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 95static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
93{ 96{
@@ -225,6 +228,8 @@ static void call_bio_endio(struct r1bio *r1_bio)
225 struct bio *bio = r1_bio->master_bio; 228 struct bio *bio = r1_bio->master_bio;
226 int done; 229 int done;
227 struct r1conf *conf = r1_bio->mddev->private; 230 struct r1conf *conf = r1_bio->mddev->private;
231 sector_t start_next_window = r1_bio->start_next_window;
232 sector_t bi_sector = bio->bi_sector;
228 233
229 if (bio->bi_phys_segments) { 234 if (bio->bi_phys_segments) {
230 unsigned long flags; 235 unsigned long flags;
@@ -232,6 +237,11 @@ static void call_bio_endio(struct r1bio *r1_bio)
232 bio->bi_phys_segments--; 237 bio->bi_phys_segments--;
233 done = (bio->bi_phys_segments == 0); 238 done = (bio->bi_phys_segments == 0);
234 spin_unlock_irqrestore(&conf->device_lock, flags); 239 spin_unlock_irqrestore(&conf->device_lock, flags);
240 /*
241 * make_request() might be waiting for
242 * bi_phys_segments to decrease
243 */
244 wake_up(&conf->wait_barrier);
235 } else 245 } else
236 done = 1; 246 done = 1;
237 247
@@ -243,7 +253,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
243 * Wake up any possible resync thread that waits for the device 253 * Wake up any possible resync thread that waits for the device
244 * to go idle. 254 * to go idle.
245 */ 255 */
246 allow_barrier(conf); 256 allow_barrier(conf, start_next_window, bi_sector);
247 } 257 }
248} 258}
249 259
@@ -814,8 +824,6 @@ static void flush_pending_writes(struct r1conf *conf)
814 * there is no normal IO happeing. It must arrange to call 824 * there is no normal IO happeing. It must arrange to call
815 * lower_barrier when the particular background IO completes. 825 * lower_barrier when the particular background IO completes.
816 */ 826 */
817#define RESYNC_DEPTH 32
818
819static void raise_barrier(struct r1conf *conf) 827static void raise_barrier(struct r1conf *conf)
820{ 828{
821 spin_lock_irq(&conf->resync_lock); 829 spin_lock_irq(&conf->resync_lock);
@@ -827,9 +835,19 @@ static void raise_barrier(struct r1conf *conf)
827 /* block any new IO from starting */ 835 /* block any new IO from starting */
828 conf->barrier++; 836 conf->barrier++;
829 837
830 /* Now wait for all pending IO to complete */ 838 /* For these conditions we must wait:
839 * A: while the array is in frozen state
840 * B: while barrier >= RESYNC_DEPTH, meaning resync reach
841 * the max count which allowed.
842 * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
843 * next resync will reach to the window which normal bios are
844 * handling.
845 */
831 wait_event_lock_irq(conf->wait_barrier, 846 wait_event_lock_irq(conf->wait_barrier,
832 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 847 !conf->array_frozen &&
848 conf->barrier < RESYNC_DEPTH &&
849 (conf->start_next_window >=
850 conf->next_resync + RESYNC_SECTORS),
833 conf->resync_lock); 851 conf->resync_lock);
834 852
835 spin_unlock_irq(&conf->resync_lock); 853 spin_unlock_irq(&conf->resync_lock);
@@ -845,10 +863,33 @@ static void lower_barrier(struct r1conf *conf)
845 wake_up(&conf->wait_barrier); 863 wake_up(&conf->wait_barrier);
846} 864}
847 865
848static void wait_barrier(struct r1conf *conf) 866static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
849{ 867{
868 bool wait = false;
869
870 if (conf->array_frozen || !bio)
871 wait = true;
872 else if (conf->barrier && bio_data_dir(bio) == WRITE) {
873 if (conf->next_resync < RESYNC_WINDOW_SECTORS)
874 wait = true;
875 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
876 >= bio_end_sector(bio)) ||
877 (conf->next_resync + NEXT_NORMALIO_DISTANCE
878 <= bio->bi_sector))
879 wait = false;
880 else
881 wait = true;
882 }
883
884 return wait;
885}
886
887static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
888{
889 sector_t sector = 0;
890
850 spin_lock_irq(&conf->resync_lock); 891 spin_lock_irq(&conf->resync_lock);
851 if (conf->barrier) { 892 if (need_to_wait_for_sync(conf, bio)) {
852 conf->nr_waiting++; 893 conf->nr_waiting++;
853 /* Wait for the barrier to drop. 894 /* Wait for the barrier to drop.
854 * However if there are already pending 895 * However if there are already pending
@@ -860,22 +901,67 @@ static void wait_barrier(struct r1conf *conf)
860 * count down. 901 * count down.
861 */ 902 */
862 wait_event_lock_irq(conf->wait_barrier, 903 wait_event_lock_irq(conf->wait_barrier,
863 !conf->barrier || 904 !conf->array_frozen &&
864 (conf->nr_pending && 905 (!conf->barrier ||
906 ((conf->start_next_window <
907 conf->next_resync + RESYNC_SECTORS) &&
865 current->bio_list && 908 current->bio_list &&
866 !bio_list_empty(current->bio_list)), 909 !bio_list_empty(current->bio_list))),
867 conf->resync_lock); 910 conf->resync_lock);
868 conf->nr_waiting--; 911 conf->nr_waiting--;
869 } 912 }
913
914 if (bio && bio_data_dir(bio) == WRITE) {
915 if (conf->next_resync + NEXT_NORMALIO_DISTANCE
916 <= bio->bi_sector) {
917 if (conf->start_next_window == MaxSector)
918 conf->start_next_window =
919 conf->next_resync +
920 NEXT_NORMALIO_DISTANCE;
921
922 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
923 <= bio->bi_sector)
924 conf->next_window_requests++;
925 else
926 conf->current_window_requests++;
927 }
928 if (bio->bi_sector >= conf->start_next_window)
929 sector = conf->start_next_window;
930 }
931
870 conf->nr_pending++; 932 conf->nr_pending++;
871 spin_unlock_irq(&conf->resync_lock); 933 spin_unlock_irq(&conf->resync_lock);
934 return sector;
872} 935}
873 936
874static void allow_barrier(struct r1conf *conf) 937static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
938 sector_t bi_sector)
875{ 939{
876 unsigned long flags; 940 unsigned long flags;
941
877 spin_lock_irqsave(&conf->resync_lock, flags); 942 spin_lock_irqsave(&conf->resync_lock, flags);
878 conf->nr_pending--; 943 conf->nr_pending--;
944 if (start_next_window) {
945 if (start_next_window == conf->start_next_window) {
946 if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
947 <= bi_sector)
948 conf->next_window_requests--;
949 else
950 conf->current_window_requests--;
951 } else
952 conf->current_window_requests--;
953
954 if (!conf->current_window_requests) {
955 if (conf->next_window_requests) {
956 conf->current_window_requests =
957 conf->next_window_requests;
958 conf->next_window_requests = 0;
959 conf->start_next_window +=
960 NEXT_NORMALIO_DISTANCE;
961 } else
962 conf->start_next_window = MaxSector;
963 }
964 }
879 spin_unlock_irqrestore(&conf->resync_lock, flags); 965 spin_unlock_irqrestore(&conf->resync_lock, flags);
880 wake_up(&conf->wait_barrier); 966 wake_up(&conf->wait_barrier);
881} 967}
@@ -884,8 +970,7 @@ static void freeze_array(struct r1conf *conf, int extra)
884{ 970{
885 /* stop syncio and normal IO and wait for everything to 971 /* stop syncio and normal IO and wait for everything to
886 * go quite. 972 * go quite.
887 * We increment barrier and nr_waiting, and then 973 * We wait until nr_pending match nr_queued+extra
888 * wait until nr_pending match nr_queued+extra
889 * This is called in the context of one normal IO request 974 * This is called in the context of one normal IO request
890 * that has failed. Thus any sync request that might be pending 975 * that has failed. Thus any sync request that might be pending
891 * will be blocked by nr_pending, and we need to wait for 976 * will be blocked by nr_pending, and we need to wait for
@@ -895,8 +980,7 @@ static void freeze_array(struct r1conf *conf, int extra)
895 * we continue. 980 * we continue.
896 */ 981 */
897 spin_lock_irq(&conf->resync_lock); 982 spin_lock_irq(&conf->resync_lock);
898 conf->barrier++; 983 conf->array_frozen = 1;
899 conf->nr_waiting++;
900 wait_event_lock_irq_cmd(conf->wait_barrier, 984 wait_event_lock_irq_cmd(conf->wait_barrier,
901 conf->nr_pending == conf->nr_queued+extra, 985 conf->nr_pending == conf->nr_queued+extra,
902 conf->resync_lock, 986 conf->resync_lock,
@@ -907,8 +991,7 @@ static void unfreeze_array(struct r1conf *conf)
907{ 991{
908 /* reverse the effect of the freeze */ 992 /* reverse the effect of the freeze */
909 spin_lock_irq(&conf->resync_lock); 993 spin_lock_irq(&conf->resync_lock);
910 conf->barrier--; 994 conf->array_frozen = 0;
911 conf->nr_waiting--;
912 wake_up(&conf->wait_barrier); 995 wake_up(&conf->wait_barrier);
913 spin_unlock_irq(&conf->resync_lock); 996 spin_unlock_irq(&conf->resync_lock);
914} 997}
@@ -1013,6 +1096,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1013 int first_clone; 1096 int first_clone;
1014 int sectors_handled; 1097 int sectors_handled;
1015 int max_sectors; 1098 int max_sectors;
1099 sector_t start_next_window;
1016 1100
1017 /* 1101 /*
1018 * Register the new request and wait if the reconstruction 1102 * Register the new request and wait if the reconstruction
@@ -1042,7 +1126,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1042 finish_wait(&conf->wait_barrier, &w); 1126 finish_wait(&conf->wait_barrier, &w);
1043 } 1127 }
1044 1128
1045 wait_barrier(conf); 1129 start_next_window = wait_barrier(conf, bio);
1046 1130
1047 bitmap = mddev->bitmap; 1131 bitmap = mddev->bitmap;
1048 1132
@@ -1163,6 +1247,7 @@ read_again:
1163 1247
1164 disks = conf->raid_disks * 2; 1248 disks = conf->raid_disks * 2;
1165 retry_write: 1249 retry_write:
1250 r1_bio->start_next_window = start_next_window;
1166 blocked_rdev = NULL; 1251 blocked_rdev = NULL;
1167 rcu_read_lock(); 1252 rcu_read_lock();
1168 max_sectors = r1_bio->sectors; 1253 max_sectors = r1_bio->sectors;
@@ -1231,14 +1316,24 @@ read_again:
1231 if (unlikely(blocked_rdev)) { 1316 if (unlikely(blocked_rdev)) {
1232 /* Wait for this device to become unblocked */ 1317 /* Wait for this device to become unblocked */
1233 int j; 1318 int j;
1319 sector_t old = start_next_window;
1234 1320
1235 for (j = 0; j < i; j++) 1321 for (j = 0; j < i; j++)
1236 if (r1_bio->bios[j]) 1322 if (r1_bio->bios[j])
1237 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1323 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1238 r1_bio->state = 0; 1324 r1_bio->state = 0;
1239 allow_barrier(conf); 1325 allow_barrier(conf, start_next_window, bio->bi_sector);
1240 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1326 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1241 wait_barrier(conf); 1327 start_next_window = wait_barrier(conf, bio);
1328 /*
1329 * We must make sure the multi r1bios of bio have
1330 * the same value of bi_phys_segments
1331 */
1332 if (bio->bi_phys_segments && old &&
1333 old != start_next_window)
1334 /* Wait for the former r1bio(s) to complete */
1335 wait_event(conf->wait_barrier,
1336 bio->bi_phys_segments == 1);
1242 goto retry_write; 1337 goto retry_write;
1243 } 1338 }
1244 1339
@@ -1438,11 +1533,14 @@ static void print_conf(struct r1conf *conf)
1438 1533
1439static void close_sync(struct r1conf *conf) 1534static void close_sync(struct r1conf *conf)
1440{ 1535{
1441 wait_barrier(conf); 1536 wait_barrier(conf, NULL);
1442 allow_barrier(conf); 1537 allow_barrier(conf, 0, 0);
1443 1538
1444 mempool_destroy(conf->r1buf_pool); 1539 mempool_destroy(conf->r1buf_pool);
1445 conf->r1buf_pool = NULL; 1540 conf->r1buf_pool = NULL;
1541
1542 conf->next_resync = 0;
1543 conf->start_next_window = MaxSector;
1446} 1544}
1447 1545
1448static int raid1_spare_active(struct mddev *mddev) 1546static int raid1_spare_active(struct mddev *mddev)
@@ -2714,6 +2812,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2714 conf->pending_count = 0; 2812 conf->pending_count = 0;
2715 conf->recovery_disabled = mddev->recovery_disabled - 1; 2813 conf->recovery_disabled = mddev->recovery_disabled - 1;
2716 2814
2815 conf->start_next_window = MaxSector;
2816 conf->current_window_requests = conf->next_window_requests = 0;
2817
2717 err = -EIO; 2818 err = -EIO;
2718 for (i = 0; i < conf->raid_disks * 2; i++) { 2819 for (i = 0; i < conf->raid_disks * 2; i++) {
2719 2820
@@ -2871,8 +2972,8 @@ static int stop(struct mddev *mddev)
2871 atomic_read(&bitmap->behind_writes) == 0); 2972 atomic_read(&bitmap->behind_writes) == 0);
2872 } 2973 }
2873 2974
2874 raise_barrier(conf); 2975 freeze_array(conf, 0);
2875 lower_barrier(conf); 2976 unfreeze_array(conf);
2876 2977
2877 md_unregister_thread(&mddev->thread); 2978 md_unregister_thread(&mddev->thread);
2878 if (conf->r1bio_pool) 2979 if (conf->r1bio_pool)
@@ -3031,10 +3132,10 @@ static void raid1_quiesce(struct mddev *mddev, int state)
3031 wake_up(&conf->wait_barrier); 3132 wake_up(&conf->wait_barrier);
3032 break; 3133 break;
3033 case 1: 3134 case 1:
3034 raise_barrier(conf); 3135 freeze_array(conf, 0);
3035 break; 3136 break;
3036 case 0: 3137 case 0:
3037 lower_barrier(conf); 3138 unfreeze_array(conf);
3038 break; 3139 break;
3039 } 3140 }
3040} 3141}
@@ -3051,7 +3152,8 @@ static void *raid1_takeover(struct mddev *mddev)
3051 mddev->new_chunk_sectors = 0; 3152 mddev->new_chunk_sectors = 0;
3052 conf = setup_conf(mddev); 3153 conf = setup_conf(mddev);
3053 if (!IS_ERR(conf)) 3154 if (!IS_ERR(conf))
3054 conf->barrier = 1; 3155 /* Array must appear to be quiesced */
3156 conf->array_frozen = 1;
3055 return conf; 3157 return conf;
3056 } 3158 }
3057 return ERR_PTR(-EINVAL); 3159 return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 0ff3715fb7eb..9bebca7bff2f 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -41,6 +41,19 @@ struct r1conf {
41 */ 41 */
42 sector_t next_resync; 42 sector_t next_resync;
43 43
44 /* When raid1 starts resync, we divide array into four partitions
45 * |---------|--------------|---------------------|-------------|
46 * next_resync start_next_window end_window
47 * start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
48 * end_window = start_next_window + NEXT_NORMALIO_DISTANCE
49 * current_window_requests means the count of normalIO between
50 * start_next_window and end_window.
51 * next_window_requests means the count of normalIO after end_window.
52 * */
53 sector_t start_next_window;
54 int current_window_requests;
55 int next_window_requests;
56
44 spinlock_t device_lock; 57 spinlock_t device_lock;
45 58
46 /* list of 'struct r1bio' that need to be processed by raid1d, 59 /* list of 'struct r1bio' that need to be processed by raid1d,
@@ -65,6 +78,7 @@ struct r1conf {
65 int nr_waiting; 78 int nr_waiting;
66 int nr_queued; 79 int nr_queued;
67 int barrier; 80 int barrier;
81 int array_frozen;
68 82
69 /* Set to 1 if a full sync is needed, (fresh device added). 83 /* Set to 1 if a full sync is needed, (fresh device added).
70 * Cleared when a sync completes. 84 * Cleared when a sync completes.
@@ -111,6 +125,7 @@ struct r1bio {
111 * in this BehindIO request 125 * in this BehindIO request
112 */ 126 */
113 sector_t sector; 127 sector_t sector;
128 sector_t start_next_window;
114 int sectors; 129 int sectors;
115 unsigned long state; 130 unsigned long state;
116 struct mddev *mddev; 131 struct mddev *mddev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7c3508abb5e1..c504e8389e69 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4384,7 +4384,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4384 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4384 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4385 md_wakeup_thread(mddev->thread); 4385 md_wakeup_thread(mddev->thread);
4386 wait_event(mddev->sb_wait, mddev->flags == 0 || 4386 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4387 kthread_should_stop()); 4387 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4388 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4389 allow_barrier(conf);
4390 return sectors_done;
4391 }
4388 conf->reshape_safe = mddev->reshape_position; 4392 conf->reshape_safe = mddev->reshape_position;
4389 allow_barrier(conf); 4393 allow_barrier(conf);
4390 } 4394 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7f0e17a27aeb..cc055da02e2a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -85,6 +85,42 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
85 return &conf->stripe_hashtbl[hash]; 85 return &conf->stripe_hashtbl[hash];
86} 86}
87 87
88static inline int stripe_hash_locks_hash(sector_t sect)
89{
90 return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
91}
92
93static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
94{
95 spin_lock_irq(conf->hash_locks + hash);
96 spin_lock(&conf->device_lock);
97}
98
99static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
100{
101 spin_unlock(&conf->device_lock);
102 spin_unlock_irq(conf->hash_locks + hash);
103}
104
105static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
106{
107 int i;
108 local_irq_disable();
109 spin_lock(conf->hash_locks);
110 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
111 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
112 spin_lock(&conf->device_lock);
113}
114
115static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
116{
117 int i;
118 spin_unlock(&conf->device_lock);
119 for (i = NR_STRIPE_HASH_LOCKS; i; i--)
120 spin_unlock(conf->hash_locks + i - 1);
121 local_irq_enable();
122}
123
88/* bio's attached to a stripe+device for I/O are linked together in bi_sector 124/* bio's attached to a stripe+device for I/O are linked together in bi_sector
89 * order without overlap. There may be several bio's per stripe+device, and 125 * order without overlap. There may be several bio's per stripe+device, and
90 * a bio could span several devices. 126 * a bio could span several devices.
@@ -249,7 +285,8 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
249 } 285 }
250} 286}
251 287
252static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) 288static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
289 struct list_head *temp_inactive_list)
253{ 290{
254 BUG_ON(!list_empty(&sh->lru)); 291 BUG_ON(!list_empty(&sh->lru));
255 BUG_ON(atomic_read(&conf->active_stripes)==0); 292 BUG_ON(atomic_read(&conf->active_stripes)==0);
@@ -278,23 +315,68 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
278 < IO_THRESHOLD) 315 < IO_THRESHOLD)
279 md_wakeup_thread(conf->mddev->thread); 316 md_wakeup_thread(conf->mddev->thread);
280 atomic_dec(&conf->active_stripes); 317 atomic_dec(&conf->active_stripes);
281 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 318 if (!test_bit(STRIPE_EXPANDING, &sh->state))
282 list_add_tail(&sh->lru, &conf->inactive_list); 319 list_add_tail(&sh->lru, temp_inactive_list);
283 wake_up(&conf->wait_for_stripe);
284 if (conf->retry_read_aligned)
285 md_wakeup_thread(conf->mddev->thread);
286 }
287 } 320 }
288} 321}
289 322
290static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) 323static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
324 struct list_head *temp_inactive_list)
291{ 325{
292 if (atomic_dec_and_test(&sh->count)) 326 if (atomic_dec_and_test(&sh->count))
293 do_release_stripe(conf, sh); 327 do_release_stripe(conf, sh, temp_inactive_list);
328}
329
330/*
331 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
332 *
333 * Be careful: Only one task can add/delete stripes from temp_inactive_list at
334 * given time. Adding stripes only takes device lock, while deleting stripes
335 * only takes hash lock.
336 */
337static void release_inactive_stripe_list(struct r5conf *conf,
338 struct list_head *temp_inactive_list,
339 int hash)
340{
341 int size;
342 bool do_wakeup = false;
343 unsigned long flags;
344
345 if (hash == NR_STRIPE_HASH_LOCKS) {
346 size = NR_STRIPE_HASH_LOCKS;
347 hash = NR_STRIPE_HASH_LOCKS - 1;
348 } else
349 size = 1;
350 while (size) {
351 struct list_head *list = &temp_inactive_list[size - 1];
352
353 /*
354 * We don't hold any lock here yet, get_active_stripe() might
355 * remove stripes from the list
356 */
357 if (!list_empty_careful(list)) {
358 spin_lock_irqsave(conf->hash_locks + hash, flags);
359 if (list_empty(conf->inactive_list + hash) &&
360 !list_empty(list))
361 atomic_dec(&conf->empty_inactive_list_nr);
362 list_splice_tail_init(list, conf->inactive_list + hash);
363 do_wakeup = true;
364 spin_unlock_irqrestore(conf->hash_locks + hash, flags);
365 }
366 size--;
367 hash--;
368 }
369
370 if (do_wakeup) {
371 wake_up(&conf->wait_for_stripe);
372 if (conf->retry_read_aligned)
373 md_wakeup_thread(conf->mddev->thread);
374 }
294} 375}
295 376
296/* should hold conf->device_lock already */ 377/* should hold conf->device_lock already */
297static int release_stripe_list(struct r5conf *conf) 378static int release_stripe_list(struct r5conf *conf,
379 struct list_head *temp_inactive_list)
298{ 380{
299 struct stripe_head *sh; 381 struct stripe_head *sh;
300 int count = 0; 382 int count = 0;
@@ -303,6 +385,8 @@ static int release_stripe_list(struct r5conf *conf)
303 head = llist_del_all(&conf->released_stripes); 385 head = llist_del_all(&conf->released_stripes);
304 head = llist_reverse_order(head); 386 head = llist_reverse_order(head);
305 while (head) { 387 while (head) {
388 int hash;
389
306 sh = llist_entry(head, struct stripe_head, release_list); 390 sh = llist_entry(head, struct stripe_head, release_list);
307 head = llist_next(head); 391 head = llist_next(head);
308 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ 392 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
@@ -313,7 +397,8 @@ static int release_stripe_list(struct r5conf *conf)
313 * again, the count is always > 1. This is true for 397 * again, the count is always > 1. This is true for
314 * STRIPE_ON_UNPLUG_LIST bit too. 398 * STRIPE_ON_UNPLUG_LIST bit too.
315 */ 399 */
316 __release_stripe(conf, sh); 400 hash = sh->hash_lock_index;
401 __release_stripe(conf, sh, &temp_inactive_list[hash]);
317 count++; 402 count++;
318 } 403 }
319 404
@@ -324,9 +409,12 @@ static void release_stripe(struct stripe_head *sh)
324{ 409{
325 struct r5conf *conf = sh->raid_conf; 410 struct r5conf *conf = sh->raid_conf;
326 unsigned long flags; 411 unsigned long flags;
412 struct list_head list;
413 int hash;
327 bool wakeup; 414 bool wakeup;
328 415
329 if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) 416 if (unlikely(!conf->mddev->thread) ||
417 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
330 goto slow_path; 418 goto slow_path;
331 wakeup = llist_add(&sh->release_list, &conf->released_stripes); 419 wakeup = llist_add(&sh->release_list, &conf->released_stripes);
332 if (wakeup) 420 if (wakeup)
@@ -336,8 +424,11 @@ slow_path:
336 local_irq_save(flags); 424 local_irq_save(flags);
337 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ 425 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
338 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { 426 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
339 do_release_stripe(conf, sh); 427 INIT_LIST_HEAD(&list);
428 hash = sh->hash_lock_index;
429 do_release_stripe(conf, sh, &list);
340 spin_unlock(&conf->device_lock); 430 spin_unlock(&conf->device_lock);
431 release_inactive_stripe_list(conf, &list, hash);
341 } 432 }
342 local_irq_restore(flags); 433 local_irq_restore(flags);
343} 434}
@@ -362,18 +453,21 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
362 453
363 454
364/* find an idle stripe, make sure it is unhashed, and return it. */ 455/* find an idle stripe, make sure it is unhashed, and return it. */
365static struct stripe_head *get_free_stripe(struct r5conf *conf) 456static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
366{ 457{
367 struct stripe_head *sh = NULL; 458 struct stripe_head *sh = NULL;
368 struct list_head *first; 459 struct list_head *first;
369 460
370 if (list_empty(&conf->inactive_list)) 461 if (list_empty(conf->inactive_list + hash))
371 goto out; 462 goto out;
372 first = conf->inactive_list.next; 463 first = (conf->inactive_list + hash)->next;
373 sh = list_entry(first, struct stripe_head, lru); 464 sh = list_entry(first, struct stripe_head, lru);
374 list_del_init(first); 465 list_del_init(first);
375 remove_hash(sh); 466 remove_hash(sh);
376 atomic_inc(&conf->active_stripes); 467 atomic_inc(&conf->active_stripes);
468 BUG_ON(hash != sh->hash_lock_index);
469 if (list_empty(conf->inactive_list + hash))
470 atomic_inc(&conf->empty_inactive_list_nr);
377out: 471out:
378 return sh; 472 return sh;
379} 473}
@@ -416,7 +510,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
416static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 510static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
417{ 511{
418 struct r5conf *conf = sh->raid_conf; 512 struct r5conf *conf = sh->raid_conf;
419 int i; 513 int i, seq;
420 514
421 BUG_ON(atomic_read(&sh->count) != 0); 515 BUG_ON(atomic_read(&sh->count) != 0);
422 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 516 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
@@ -426,7 +520,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
426 (unsigned long long)sh->sector); 520 (unsigned long long)sh->sector);
427 521
428 remove_hash(sh); 522 remove_hash(sh);
429 523retry:
524 seq = read_seqcount_begin(&conf->gen_lock);
430 sh->generation = conf->generation - previous; 525 sh->generation = conf->generation - previous;
431 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 526 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
432 sh->sector = sector; 527 sh->sector = sector;
@@ -448,6 +543,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
448 dev->flags = 0; 543 dev->flags = 0;
449 raid5_build_block(sh, i, previous); 544 raid5_build_block(sh, i, previous);
450 } 545 }
546 if (read_seqcount_retry(&conf->gen_lock, seq))
547 goto retry;
451 insert_hash(conf, sh); 548 insert_hash(conf, sh);
452 sh->cpu = smp_processor_id(); 549 sh->cpu = smp_processor_id();
453} 550}
@@ -552,57 +649,59 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
552 int previous, int noblock, int noquiesce) 649 int previous, int noblock, int noquiesce)
553{ 650{
554 struct stripe_head *sh; 651 struct stripe_head *sh;
652 int hash = stripe_hash_locks_hash(sector);
555 653
556 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 654 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
557 655
558 spin_lock_irq(&conf->device_lock); 656 spin_lock_irq(conf->hash_locks + hash);
559 657
560 do { 658 do {
561 wait_event_lock_irq(conf->wait_for_stripe, 659 wait_event_lock_irq(conf->wait_for_stripe,
562 conf->quiesce == 0 || noquiesce, 660 conf->quiesce == 0 || noquiesce,
563 conf->device_lock); 661 *(conf->hash_locks + hash));
564 sh = __find_stripe(conf, sector, conf->generation - previous); 662 sh = __find_stripe(conf, sector, conf->generation - previous);
565 if (!sh) { 663 if (!sh) {
566 if (!conf->inactive_blocked) 664 if (!conf->inactive_blocked)
567 sh = get_free_stripe(conf); 665 sh = get_free_stripe(conf, hash);
568 if (noblock && sh == NULL) 666 if (noblock && sh == NULL)
569 break; 667 break;
570 if (!sh) { 668 if (!sh) {
571 conf->inactive_blocked = 1; 669 conf->inactive_blocked = 1;
572 wait_event_lock_irq(conf->wait_for_stripe, 670 wait_event_lock_irq(
573 !list_empty(&conf->inactive_list) && 671 conf->wait_for_stripe,
574 (atomic_read(&conf->active_stripes) 672 !list_empty(conf->inactive_list + hash) &&
575 < (conf->max_nr_stripes *3/4) 673 (atomic_read(&conf->active_stripes)
576 || !conf->inactive_blocked), 674 < (conf->max_nr_stripes * 3 / 4)
577 conf->device_lock); 675 || !conf->inactive_blocked),
676 *(conf->hash_locks + hash));
578 conf->inactive_blocked = 0; 677 conf->inactive_blocked = 0;
579 } else 678 } else
580 init_stripe(sh, sector, previous); 679 init_stripe(sh, sector, previous);
581 } else { 680 } else {
681 spin_lock(&conf->device_lock);
582 if (atomic_read(&sh->count)) { 682 if (atomic_read(&sh->count)) {
583 BUG_ON(!list_empty(&sh->lru) 683 BUG_ON(!list_empty(&sh->lru)
584 && !test_bit(STRIPE_EXPANDING, &sh->state) 684 && !test_bit(STRIPE_EXPANDING, &sh->state)
585 && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) 685 && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
586 && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); 686 );
587 } else { 687 } else {
588 if (!test_bit(STRIPE_HANDLE, &sh->state)) 688 if (!test_bit(STRIPE_HANDLE, &sh->state))
589 atomic_inc(&conf->active_stripes); 689 atomic_inc(&conf->active_stripes);
590 if (list_empty(&sh->lru) && 690 BUG_ON(list_empty(&sh->lru));
591 !test_bit(STRIPE_EXPANDING, &sh->state))
592 BUG();
593 list_del_init(&sh->lru); 691 list_del_init(&sh->lru);
594 if (sh->group) { 692 if (sh->group) {
595 sh->group->stripes_cnt--; 693 sh->group->stripes_cnt--;
596 sh->group = NULL; 694 sh->group = NULL;
597 } 695 }
598 } 696 }
697 spin_unlock(&conf->device_lock);
599 } 698 }
600 } while (sh == NULL); 699 } while (sh == NULL);
601 700
602 if (sh) 701 if (sh)
603 atomic_inc(&sh->count); 702 atomic_inc(&sh->count);
604 703
605 spin_unlock_irq(&conf->device_lock); 704 spin_unlock_irq(conf->hash_locks + hash);
606 return sh; 705 return sh;
607} 706}
608 707
@@ -758,7 +857,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
758 bi->bi_sector = (sh->sector 857 bi->bi_sector = (sh->sector
759 + rdev->data_offset); 858 + rdev->data_offset);
760 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 859 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
761 bi->bi_rw |= REQ_FLUSH; 860 bi->bi_rw |= REQ_NOMERGE;
762 861
763 bi->bi_vcnt = 1; 862 bi->bi_vcnt = 1;
764 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 863 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
@@ -1582,7 +1681,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1582 put_cpu(); 1681 put_cpu();
1583} 1682}
1584 1683
1585static int grow_one_stripe(struct r5conf *conf) 1684static int grow_one_stripe(struct r5conf *conf, int hash)
1586{ 1685{
1587 struct stripe_head *sh; 1686 struct stripe_head *sh;
1588 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); 1687 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
@@ -1598,6 +1697,7 @@ static int grow_one_stripe(struct r5conf *conf)
1598 kmem_cache_free(conf->slab_cache, sh); 1697 kmem_cache_free(conf->slab_cache, sh);
1599 return 0; 1698 return 0;
1600 } 1699 }
1700 sh->hash_lock_index = hash;
1601 /* we just created an active stripe so... */ 1701 /* we just created an active stripe so... */
1602 atomic_set(&sh->count, 1); 1702 atomic_set(&sh->count, 1);
1603 atomic_inc(&conf->active_stripes); 1703 atomic_inc(&conf->active_stripes);
@@ -1610,6 +1710,7 @@ static int grow_stripes(struct r5conf *conf, int num)
1610{ 1710{
1611 struct kmem_cache *sc; 1711 struct kmem_cache *sc;
1612 int devs = max(conf->raid_disks, conf->previous_raid_disks); 1712 int devs = max(conf->raid_disks, conf->previous_raid_disks);
1713 int hash;
1613 1714
1614 if (conf->mddev->gendisk) 1715 if (conf->mddev->gendisk)
1615 sprintf(conf->cache_name[0], 1716 sprintf(conf->cache_name[0],
@@ -1627,9 +1728,13 @@ static int grow_stripes(struct r5conf *conf, int num)
1627 return 1; 1728 return 1;
1628 conf->slab_cache = sc; 1729 conf->slab_cache = sc;
1629 conf->pool_size = devs; 1730 conf->pool_size = devs;
1630 while (num--) 1731 hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
1631 if (!grow_one_stripe(conf)) 1732 while (num--) {
1733 if (!grow_one_stripe(conf, hash))
1632 return 1; 1734 return 1;
1735 conf->max_nr_stripes++;
1736 hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
1737 }
1633 return 0; 1738 return 0;
1634} 1739}
1635 1740
@@ -1687,6 +1792,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1687 int err; 1792 int err;
1688 struct kmem_cache *sc; 1793 struct kmem_cache *sc;
1689 int i; 1794 int i;
1795 int hash, cnt;
1690 1796
1691 if (newsize <= conf->pool_size) 1797 if (newsize <= conf->pool_size)
1692 return 0; /* never bother to shrink */ 1798 return 0; /* never bother to shrink */
@@ -1726,19 +1832,29 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1726 * OK, we have enough stripes, start collecting inactive 1832 * OK, we have enough stripes, start collecting inactive
1727 * stripes and copying them over 1833 * stripes and copying them over
1728 */ 1834 */
1835 hash = 0;
1836 cnt = 0;
1729 list_for_each_entry(nsh, &newstripes, lru) { 1837 list_for_each_entry(nsh, &newstripes, lru) {
1730 spin_lock_irq(&conf->device_lock); 1838 lock_device_hash_lock(conf, hash);
1731 wait_event_lock_irq(conf->wait_for_stripe, 1839 wait_event_cmd(conf->wait_for_stripe,
1732 !list_empty(&conf->inactive_list), 1840 !list_empty(conf->inactive_list + hash),
1733 conf->device_lock); 1841 unlock_device_hash_lock(conf, hash),
1734 osh = get_free_stripe(conf); 1842 lock_device_hash_lock(conf, hash));
1735 spin_unlock_irq(&conf->device_lock); 1843 osh = get_free_stripe(conf, hash);
1844 unlock_device_hash_lock(conf, hash);
1736 atomic_set(&nsh->count, 1); 1845 atomic_set(&nsh->count, 1);
1737 for(i=0; i<conf->pool_size; i++) 1846 for(i=0; i<conf->pool_size; i++)
1738 nsh->dev[i].page = osh->dev[i].page; 1847 nsh->dev[i].page = osh->dev[i].page;
1739 for( ; i<newsize; i++) 1848 for( ; i<newsize; i++)
1740 nsh->dev[i].page = NULL; 1849 nsh->dev[i].page = NULL;
1850 nsh->hash_lock_index = hash;
1741 kmem_cache_free(conf->slab_cache, osh); 1851 kmem_cache_free(conf->slab_cache, osh);
1852 cnt++;
1853 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
1854 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
1855 hash++;
1856 cnt = 0;
1857 }
1742 } 1858 }
1743 kmem_cache_destroy(conf->slab_cache); 1859 kmem_cache_destroy(conf->slab_cache);
1744 1860
@@ -1797,13 +1913,13 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1797 return err; 1913 return err;
1798} 1914}
1799 1915
1800static int drop_one_stripe(struct r5conf *conf) 1916static int drop_one_stripe(struct r5conf *conf, int hash)
1801{ 1917{
1802 struct stripe_head *sh; 1918 struct stripe_head *sh;
1803 1919
1804 spin_lock_irq(&conf->device_lock); 1920 spin_lock_irq(conf->hash_locks + hash);
1805 sh = get_free_stripe(conf); 1921 sh = get_free_stripe(conf, hash);
1806 spin_unlock_irq(&conf->device_lock); 1922 spin_unlock_irq(conf->hash_locks + hash);
1807 if (!sh) 1923 if (!sh)
1808 return 0; 1924 return 0;
1809 BUG_ON(atomic_read(&sh->count)); 1925 BUG_ON(atomic_read(&sh->count));
@@ -1815,8 +1931,10 @@ static int drop_one_stripe(struct r5conf *conf)
1815 1931
1816static void shrink_stripes(struct r5conf *conf) 1932static void shrink_stripes(struct r5conf *conf)
1817{ 1933{
1818 while (drop_one_stripe(conf)) 1934 int hash;
1819 ; 1935 for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++)
1936 while (drop_one_stripe(conf, hash))
1937 ;
1820 1938
1821 if (conf->slab_cache) 1939 if (conf->slab_cache)
1822 kmem_cache_destroy(conf->slab_cache); 1940 kmem_cache_destroy(conf->slab_cache);
@@ -1921,6 +2039,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
1921 mdname(conf->mddev), bdn); 2039 mdname(conf->mddev), bdn);
1922 else 2040 else
1923 retry = 1; 2041 retry = 1;
2042 if (set_bad && test_bit(In_sync, &rdev->flags)
2043 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2044 retry = 1;
1924 if (retry) 2045 if (retry)
1925 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { 2046 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
1926 set_bit(R5_ReadError, &sh->dev[i].flags); 2047 set_bit(R5_ReadError, &sh->dev[i].flags);
@@ -3900,7 +4021,8 @@ static void raid5_activate_delayed(struct r5conf *conf)
3900 } 4021 }
3901} 4022}
3902 4023
3903static void activate_bit_delay(struct r5conf *conf) 4024static void activate_bit_delay(struct r5conf *conf,
4025 struct list_head *temp_inactive_list)
3904{ 4026{
3905 /* device_lock is held */ 4027 /* device_lock is held */
3906 struct list_head head; 4028 struct list_head head;
@@ -3908,9 +4030,11 @@ static void activate_bit_delay(struct r5conf *conf)
3908 list_del_init(&conf->bitmap_list); 4030 list_del_init(&conf->bitmap_list);
3909 while (!list_empty(&head)) { 4031 while (!list_empty(&head)) {
3910 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 4032 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
4033 int hash;
3911 list_del_init(&sh->lru); 4034 list_del_init(&sh->lru);
3912 atomic_inc(&sh->count); 4035 atomic_inc(&sh->count);
3913 __release_stripe(conf, sh); 4036 hash = sh->hash_lock_index;
4037 __release_stripe(conf, sh, &temp_inactive_list[hash]);
3914 } 4038 }
3915} 4039}
3916 4040
@@ -3926,7 +4050,7 @@ int md_raid5_congested(struct mddev *mddev, int bits)
3926 return 1; 4050 return 1;
3927 if (conf->quiesce) 4051 if (conf->quiesce)
3928 return 1; 4052 return 1;
3929 if (list_empty_careful(&conf->inactive_list)) 4053 if (atomic_read(&conf->empty_inactive_list_nr))
3930 return 1; 4054 return 1;
3931 4055
3932 return 0; 4056 return 0;
@@ -4256,6 +4380,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
4256struct raid5_plug_cb { 4380struct raid5_plug_cb {
4257 struct blk_plug_cb cb; 4381 struct blk_plug_cb cb;
4258 struct list_head list; 4382 struct list_head list;
4383 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
4259}; 4384};
4260 4385
4261static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) 4386static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
@@ -4266,6 +4391,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4266 struct mddev *mddev = cb->cb.data; 4391 struct mddev *mddev = cb->cb.data;
4267 struct r5conf *conf = mddev->private; 4392 struct r5conf *conf = mddev->private;
4268 int cnt = 0; 4393 int cnt = 0;
4394 int hash;
4269 4395
4270 if (cb->list.next && !list_empty(&cb->list)) { 4396 if (cb->list.next && !list_empty(&cb->list)) {
4271 spin_lock_irq(&conf->device_lock); 4397 spin_lock_irq(&conf->device_lock);
@@ -4283,11 +4409,14 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4283 * STRIPE_ON_RELEASE_LIST could be set here. In that 4409 * STRIPE_ON_RELEASE_LIST could be set here. In that
4284 * case, the count is always > 1 here 4410 * case, the count is always > 1 here
4285 */ 4411 */
4286 __release_stripe(conf, sh); 4412 hash = sh->hash_lock_index;
4413 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
4287 cnt++; 4414 cnt++;
4288 } 4415 }
4289 spin_unlock_irq(&conf->device_lock); 4416 spin_unlock_irq(&conf->device_lock);
4290 } 4417 }
4418 release_inactive_stripe_list(conf, cb->temp_inactive_list,
4419 NR_STRIPE_HASH_LOCKS);
4291 if (mddev->queue) 4420 if (mddev->queue)
4292 trace_block_unplug(mddev->queue, cnt, !from_schedule); 4421 trace_block_unplug(mddev->queue, cnt, !from_schedule);
4293 kfree(cb); 4422 kfree(cb);
@@ -4308,8 +4437,12 @@ static void release_stripe_plug(struct mddev *mddev,
4308 4437
4309 cb = container_of(blk_cb, struct raid5_plug_cb, cb); 4438 cb = container_of(blk_cb, struct raid5_plug_cb, cb);
4310 4439
4311 if (cb->list.next == NULL) 4440 if (cb->list.next == NULL) {
4441 int i;
4312 INIT_LIST_HEAD(&cb->list); 4442 INIT_LIST_HEAD(&cb->list);
4443 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
4444 INIT_LIST_HEAD(cb->temp_inactive_list + i);
4445 }
4313 4446
4314 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 4447 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
4315 list_add_tail(&sh->lru, &cb->list); 4448 list_add_tail(&sh->lru, &cb->list);
@@ -4692,14 +4825,19 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
4692 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 4825 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4693 /* Cannot proceed until we've updated the superblock... */ 4826 /* Cannot proceed until we've updated the superblock... */
4694 wait_event(conf->wait_for_overlap, 4827 wait_event(conf->wait_for_overlap,
4695 atomic_read(&conf->reshape_stripes)==0); 4828 atomic_read(&conf->reshape_stripes)==0
4829 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4830 if (atomic_read(&conf->reshape_stripes) != 0)
4831 return 0;
4696 mddev->reshape_position = conf->reshape_progress; 4832 mddev->reshape_position = conf->reshape_progress;
4697 mddev->curr_resync_completed = sector_nr; 4833 mddev->curr_resync_completed = sector_nr;
4698 conf->reshape_checkpoint = jiffies; 4834 conf->reshape_checkpoint = jiffies;
4699 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4835 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4700 md_wakeup_thread(mddev->thread); 4836 md_wakeup_thread(mddev->thread);
4701 wait_event(mddev->sb_wait, mddev->flags == 0 || 4837 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4702 kthread_should_stop()); 4838 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4839 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4840 return 0;
4703 spin_lock_irq(&conf->device_lock); 4841 spin_lock_irq(&conf->device_lock);
4704 conf->reshape_safe = mddev->reshape_position; 4842 conf->reshape_safe = mddev->reshape_position;
4705 spin_unlock_irq(&conf->device_lock); 4843 spin_unlock_irq(&conf->device_lock);
@@ -4782,7 +4920,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
4782 >= mddev->resync_max - mddev->curr_resync_completed) { 4920 >= mddev->resync_max - mddev->curr_resync_completed) {
4783 /* Cannot proceed until we've updated the superblock... */ 4921 /* Cannot proceed until we've updated the superblock... */
4784 wait_event(conf->wait_for_overlap, 4922 wait_event(conf->wait_for_overlap,
4785 atomic_read(&conf->reshape_stripes) == 0); 4923 atomic_read(&conf->reshape_stripes) == 0
4924 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4925 if (atomic_read(&conf->reshape_stripes) != 0)
4926 goto ret;
4786 mddev->reshape_position = conf->reshape_progress; 4927 mddev->reshape_position = conf->reshape_progress;
4787 mddev->curr_resync_completed = sector_nr; 4928 mddev->curr_resync_completed = sector_nr;
4788 conf->reshape_checkpoint = jiffies; 4929 conf->reshape_checkpoint = jiffies;
@@ -4790,13 +4931,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
4790 md_wakeup_thread(mddev->thread); 4931 md_wakeup_thread(mddev->thread);
4791 wait_event(mddev->sb_wait, 4932 wait_event(mddev->sb_wait,
4792 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 4933 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4793 || kthread_should_stop()); 4934 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4935 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4936 goto ret;
4794 spin_lock_irq(&conf->device_lock); 4937 spin_lock_irq(&conf->device_lock);
4795 conf->reshape_safe = mddev->reshape_position; 4938 conf->reshape_safe = mddev->reshape_position;
4796 spin_unlock_irq(&conf->device_lock); 4939 spin_unlock_irq(&conf->device_lock);
4797 wake_up(&conf->wait_for_overlap); 4940 wake_up(&conf->wait_for_overlap);
4798 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4941 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4799 } 4942 }
4943ret:
4800 return reshape_sectors; 4944 return reshape_sectors;
4801} 4945}
4802 4946
@@ -4954,27 +5098,45 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4954} 5098}
4955 5099
4956static int handle_active_stripes(struct r5conf *conf, int group, 5100static int handle_active_stripes(struct r5conf *conf, int group,
4957 struct r5worker *worker) 5101 struct r5worker *worker,
5102 struct list_head *temp_inactive_list)
4958{ 5103{
4959 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; 5104 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
4960 int i, batch_size = 0; 5105 int i, batch_size = 0, hash;
5106 bool release_inactive = false;
4961 5107
4962 while (batch_size < MAX_STRIPE_BATCH && 5108 while (batch_size < MAX_STRIPE_BATCH &&
4963 (sh = __get_priority_stripe(conf, group)) != NULL) 5109 (sh = __get_priority_stripe(conf, group)) != NULL)
4964 batch[batch_size++] = sh; 5110 batch[batch_size++] = sh;
4965 5111
4966 if (batch_size == 0) 5112 if (batch_size == 0) {
4967 return batch_size; 5113 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5114 if (!list_empty(temp_inactive_list + i))
5115 break;
5116 if (i == NR_STRIPE_HASH_LOCKS)
5117 return batch_size;
5118 release_inactive = true;
5119 }
4968 spin_unlock_irq(&conf->device_lock); 5120 spin_unlock_irq(&conf->device_lock);
4969 5121
5122 release_inactive_stripe_list(conf, temp_inactive_list,
5123 NR_STRIPE_HASH_LOCKS);
5124
5125 if (release_inactive) {
5126 spin_lock_irq(&conf->device_lock);
5127 return 0;
5128 }
5129
4970 for (i = 0; i < batch_size; i++) 5130 for (i = 0; i < batch_size; i++)
4971 handle_stripe(batch[i]); 5131 handle_stripe(batch[i]);
4972 5132
4973 cond_resched(); 5133 cond_resched();
4974 5134
4975 spin_lock_irq(&conf->device_lock); 5135 spin_lock_irq(&conf->device_lock);
4976 for (i = 0; i < batch_size; i++) 5136 for (i = 0; i < batch_size; i++) {
4977 __release_stripe(conf, batch[i]); 5137 hash = batch[i]->hash_lock_index;
5138 __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
5139 }
4978 return batch_size; 5140 return batch_size;
4979} 5141}
4980 5142
@@ -4995,9 +5157,10 @@ static void raid5_do_work(struct work_struct *work)
4995 while (1) { 5157 while (1) {
4996 int batch_size, released; 5158 int batch_size, released;
4997 5159
4998 released = release_stripe_list(conf); 5160 released = release_stripe_list(conf, worker->temp_inactive_list);
4999 5161
5000 batch_size = handle_active_stripes(conf, group_id, worker); 5162 batch_size = handle_active_stripes(conf, group_id, worker,
5163 worker->temp_inactive_list);
5001 worker->working = false; 5164 worker->working = false;
5002 if (!batch_size && !released) 5165 if (!batch_size && !released)
5003 break; 5166 break;
@@ -5036,7 +5199,7 @@ static void raid5d(struct md_thread *thread)
5036 struct bio *bio; 5199 struct bio *bio;
5037 int batch_size, released; 5200 int batch_size, released;
5038 5201
5039 released = release_stripe_list(conf); 5202 released = release_stripe_list(conf, conf->temp_inactive_list);
5040 5203
5041 if ( 5204 if (
5042 !list_empty(&conf->bitmap_list)) { 5205 !list_empty(&conf->bitmap_list)) {
@@ -5046,7 +5209,7 @@ static void raid5d(struct md_thread *thread)
5046 bitmap_unplug(mddev->bitmap); 5209 bitmap_unplug(mddev->bitmap);
5047 spin_lock_irq(&conf->device_lock); 5210 spin_lock_irq(&conf->device_lock);
5048 conf->seq_write = conf->seq_flush; 5211 conf->seq_write = conf->seq_flush;
5049 activate_bit_delay(conf); 5212 activate_bit_delay(conf, conf->temp_inactive_list);
5050 } 5213 }
5051 raid5_activate_delayed(conf); 5214 raid5_activate_delayed(conf);
5052 5215
@@ -5060,7 +5223,8 @@ static void raid5d(struct md_thread *thread)
5060 handled++; 5223 handled++;
5061 } 5224 }
5062 5225
5063 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL); 5226 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
5227 conf->temp_inactive_list);
5064 if (!batch_size && !released) 5228 if (!batch_size && !released)
5065 break; 5229 break;
5066 handled += batch_size; 5230 handled += batch_size;
@@ -5096,22 +5260,29 @@ raid5_set_cache_size(struct mddev *mddev, int size)
5096{ 5260{
5097 struct r5conf *conf = mddev->private; 5261 struct r5conf *conf = mddev->private;
5098 int err; 5262 int err;
5263 int hash;
5099 5264
5100 if (size <= 16 || size > 32768) 5265 if (size <= 16 || size > 32768)
5101 return -EINVAL; 5266 return -EINVAL;
5267 hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
5102 while (size < conf->max_nr_stripes) { 5268 while (size < conf->max_nr_stripes) {
5103 if (drop_one_stripe(conf)) 5269 if (drop_one_stripe(conf, hash))
5104 conf->max_nr_stripes--; 5270 conf->max_nr_stripes--;
5105 else 5271 else
5106 break; 5272 break;
5273 hash--;
5274 if (hash < 0)
5275 hash = NR_STRIPE_HASH_LOCKS - 1;
5107 } 5276 }
5108 err = md_allow_write(mddev); 5277 err = md_allow_write(mddev);
5109 if (err) 5278 if (err)
5110 return err; 5279 return err;
5280 hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
5111 while (size > conf->max_nr_stripes) { 5281 while (size > conf->max_nr_stripes) {
5112 if (grow_one_stripe(conf)) 5282 if (grow_one_stripe(conf, hash))
5113 conf->max_nr_stripes++; 5283 conf->max_nr_stripes++;
5114 else break; 5284 else break;
5285 hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
5115 } 5286 }
5116 return 0; 5287 return 0;
5117} 5288}
@@ -5199,15 +5370,18 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
5199 return 0; 5370 return 0;
5200} 5371}
5201 5372
5202static int alloc_thread_groups(struct r5conf *conf, int cnt); 5373static int alloc_thread_groups(struct r5conf *conf, int cnt,
5374 int *group_cnt,
5375 int *worker_cnt_per_group,
5376 struct r5worker_group **worker_groups);
5203static ssize_t 5377static ssize_t
5204raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 5378raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
5205{ 5379{
5206 struct r5conf *conf = mddev->private; 5380 struct r5conf *conf = mddev->private;
5207 unsigned long new; 5381 unsigned long new;
5208 int err; 5382 int err;
5209 struct r5worker_group *old_groups; 5383 struct r5worker_group *new_groups, *old_groups;
5210 int old_group_cnt; 5384 int group_cnt, worker_cnt_per_group;
5211 5385
5212 if (len >= PAGE_SIZE) 5386 if (len >= PAGE_SIZE)
5213 return -EINVAL; 5387 return -EINVAL;
@@ -5223,14 +5397,19 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
5223 mddev_suspend(mddev); 5397 mddev_suspend(mddev);
5224 5398
5225 old_groups = conf->worker_groups; 5399 old_groups = conf->worker_groups;
5226 old_group_cnt = conf->worker_cnt_per_group; 5400 if (old_groups)
5401 flush_workqueue(raid5_wq);
5402
5403 err = alloc_thread_groups(conf, new,
5404 &group_cnt, &worker_cnt_per_group,
5405 &new_groups);
5406 if (!err) {
5407 spin_lock_irq(&conf->device_lock);
5408 conf->group_cnt = group_cnt;
5409 conf->worker_cnt_per_group = worker_cnt_per_group;
5410 conf->worker_groups = new_groups;
5411 spin_unlock_irq(&conf->device_lock);
5227 5412
5228 conf->worker_groups = NULL;
5229 err = alloc_thread_groups(conf, new);
5230 if (err) {
5231 conf->worker_groups = old_groups;
5232 conf->worker_cnt_per_group = old_group_cnt;
5233 } else {
5234 if (old_groups) 5413 if (old_groups)
5235 kfree(old_groups[0].workers); 5414 kfree(old_groups[0].workers);
5236 kfree(old_groups); 5415 kfree(old_groups);
@@ -5260,40 +5439,47 @@ static struct attribute_group raid5_attrs_group = {
5260 .attrs = raid5_attrs, 5439 .attrs = raid5_attrs,
5261}; 5440};
5262 5441
5263static int alloc_thread_groups(struct r5conf *conf, int cnt) 5442static int alloc_thread_groups(struct r5conf *conf, int cnt,
5443 int *group_cnt,
5444 int *worker_cnt_per_group,
5445 struct r5worker_group **worker_groups)
5264{ 5446{
5265 int i, j; 5447 int i, j, k;
5266 ssize_t size; 5448 ssize_t size;
5267 struct r5worker *workers; 5449 struct r5worker *workers;
5268 5450
5269 conf->worker_cnt_per_group = cnt; 5451 *worker_cnt_per_group = cnt;
5270 if (cnt == 0) { 5452 if (cnt == 0) {
5271 conf->worker_groups = NULL; 5453 *group_cnt = 0;
5454 *worker_groups = NULL;
5272 return 0; 5455 return 0;
5273 } 5456 }
5274 conf->group_cnt = num_possible_nodes(); 5457 *group_cnt = num_possible_nodes();
5275 size = sizeof(struct r5worker) * cnt; 5458 size = sizeof(struct r5worker) * cnt;
5276 workers = kzalloc(size * conf->group_cnt, GFP_NOIO); 5459 workers = kzalloc(size * *group_cnt, GFP_NOIO);
5277 conf->worker_groups = kzalloc(sizeof(struct r5worker_group) * 5460 *worker_groups = kzalloc(sizeof(struct r5worker_group) *
5278 conf->group_cnt, GFP_NOIO); 5461 *group_cnt, GFP_NOIO);
5279 if (!conf->worker_groups || !workers) { 5462 if (!*worker_groups || !workers) {
5280 kfree(workers); 5463 kfree(workers);
5281 kfree(conf->worker_groups); 5464 kfree(*worker_groups);
5282 conf->worker_groups = NULL;
5283 return -ENOMEM; 5465 return -ENOMEM;
5284 } 5466 }
5285 5467
5286 for (i = 0; i < conf->group_cnt; i++) { 5468 for (i = 0; i < *group_cnt; i++) {
5287 struct r5worker_group *group; 5469 struct r5worker_group *group;
5288 5470
5289 group = &conf->worker_groups[i]; 5471 group = &(*worker_groups)[i];
5290 INIT_LIST_HEAD(&group->handle_list); 5472 INIT_LIST_HEAD(&group->handle_list);
5291 group->conf = conf; 5473 group->conf = conf;
5292 group->workers = workers + i * cnt; 5474 group->workers = workers + i * cnt;
5293 5475
5294 for (j = 0; j < cnt; j++) { 5476 for (j = 0; j < cnt; j++) {
5295 group->workers[j].group = group; 5477 struct r5worker *worker = group->workers + j;
5296 INIT_WORK(&group->workers[j].work, raid5_do_work); 5478 worker->group = group;
5479 INIT_WORK(&worker->work, raid5_do_work);
5480
5481 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
5482 INIT_LIST_HEAD(worker->temp_inactive_list + k);
5297 } 5483 }
5298 } 5484 }
5299 5485
@@ -5444,6 +5630,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
5444 struct md_rdev *rdev; 5630 struct md_rdev *rdev;
5445 struct disk_info *disk; 5631 struct disk_info *disk;
5446 char pers_name[6]; 5632 char pers_name[6];
5633 int i;
5634 int group_cnt, worker_cnt_per_group;
5635 struct r5worker_group *new_group;
5447 5636
5448 if (mddev->new_level != 5 5637 if (mddev->new_level != 5
5449 && mddev->new_level != 4 5638 && mddev->new_level != 4
@@ -5478,7 +5667,12 @@ static struct r5conf *setup_conf(struct mddev *mddev)
5478 if (conf == NULL) 5667 if (conf == NULL)
5479 goto abort; 5668 goto abort;
5480 /* Don't enable multi-threading by default*/ 5669 /* Don't enable multi-threading by default*/
5481 if (alloc_thread_groups(conf, 0)) 5670 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
5671 &new_group)) {
5672 conf->group_cnt = group_cnt;
5673 conf->worker_cnt_per_group = worker_cnt_per_group;
5674 conf->worker_groups = new_group;
5675 } else
5482 goto abort; 5676 goto abort;
5483 spin_lock_init(&conf->device_lock); 5677 spin_lock_init(&conf->device_lock);
5484 seqcount_init(&conf->gen_lock); 5678 seqcount_init(&conf->gen_lock);
@@ -5488,7 +5682,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
5488 INIT_LIST_HEAD(&conf->hold_list); 5682 INIT_LIST_HEAD(&conf->hold_list);
5489 INIT_LIST_HEAD(&conf->delayed_list); 5683 INIT_LIST_HEAD(&conf->delayed_list);
5490 INIT_LIST_HEAD(&conf->bitmap_list); 5684 INIT_LIST_HEAD(&conf->bitmap_list);
5491 INIT_LIST_HEAD(&conf->inactive_list);
5492 init_llist_head(&conf->released_stripes); 5685 init_llist_head(&conf->released_stripes);
5493 atomic_set(&conf->active_stripes, 0); 5686 atomic_set(&conf->active_stripes, 0);
5494 atomic_set(&conf->preread_active_stripes, 0); 5687 atomic_set(&conf->preread_active_stripes, 0);
@@ -5514,6 +5707,21 @@ static struct r5conf *setup_conf(struct mddev *mddev)
5514 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 5707 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
5515 goto abort; 5708 goto abort;
5516 5709
5710 /* We init hash_locks[0] separately to that it can be used
5711 * as the reference lock in the spin_lock_nest_lock() call
5712 * in lock_all_device_hash_locks_irq in order to convince
5713 * lockdep that we know what we are doing.
5714 */
5715 spin_lock_init(conf->hash_locks);
5716 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
5717 spin_lock_init(conf->hash_locks + i);
5718
5719 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5720 INIT_LIST_HEAD(conf->inactive_list + i);
5721
5722 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5723 INIT_LIST_HEAD(conf->temp_inactive_list + i);
5724
5517 conf->level = mddev->new_level; 5725 conf->level = mddev->new_level;
5518 if (raid5_alloc_percpu(conf) != 0) 5726 if (raid5_alloc_percpu(conf) != 0)
5519 goto abort; 5727 goto abort;
@@ -5554,7 +5762,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
5554 else 5762 else
5555 conf->max_degraded = 1; 5763 conf->max_degraded = 1;
5556 conf->algorithm = mddev->new_layout; 5764 conf->algorithm = mddev->new_layout;
5557 conf->max_nr_stripes = NR_STRIPES;
5558 conf->reshape_progress = mddev->reshape_position; 5765 conf->reshape_progress = mddev->reshape_position;
5559 if (conf->reshape_progress != MaxSector) { 5766 if (conf->reshape_progress != MaxSector) {
5560 conf->prev_chunk_sectors = mddev->chunk_sectors; 5767 conf->prev_chunk_sectors = mddev->chunk_sectors;
@@ -5563,7 +5770,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
5563 5770
5564 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 5771 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
5565 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 5772 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
5566 if (grow_stripes(conf, conf->max_nr_stripes)) { 5773 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
5774 if (grow_stripes(conf, NR_STRIPES)) {
5567 printk(KERN_ERR 5775 printk(KERN_ERR
5568 "md/raid:%s: couldn't allocate %dkB for buffers\n", 5776 "md/raid:%s: couldn't allocate %dkB for buffers\n",
5569 mdname(mddev), memory); 5777 mdname(mddev), memory);
@@ -6369,12 +6577,18 @@ static int raid5_start_reshape(struct mddev *mddev)
6369 if (!mddev->sync_thread) { 6577 if (!mddev->sync_thread) {
6370 mddev->recovery = 0; 6578 mddev->recovery = 0;
6371 spin_lock_irq(&conf->device_lock); 6579 spin_lock_irq(&conf->device_lock);
6580 write_seqcount_begin(&conf->gen_lock);
6372 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 6581 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
6582 mddev->new_chunk_sectors =
6583 conf->chunk_sectors = conf->prev_chunk_sectors;
6584 mddev->new_layout = conf->algorithm = conf->prev_algo;
6373 rdev_for_each(rdev, mddev) 6585 rdev_for_each(rdev, mddev)
6374 rdev->new_data_offset = rdev->data_offset; 6586 rdev->new_data_offset = rdev->data_offset;
6375 smp_wmb(); 6587 smp_wmb();
6588 conf->generation --;
6376 conf->reshape_progress = MaxSector; 6589 conf->reshape_progress = MaxSector;
6377 mddev->reshape_position = MaxSector; 6590 mddev->reshape_position = MaxSector;
6591 write_seqcount_end(&conf->gen_lock);
6378 spin_unlock_irq(&conf->device_lock); 6592 spin_unlock_irq(&conf->device_lock);
6379 return -EAGAIN; 6593 return -EAGAIN;
6380 } 6594 }
@@ -6462,27 +6676,28 @@ static void raid5_quiesce(struct mddev *mddev, int state)
6462 break; 6676 break;
6463 6677
6464 case 1: /* stop all writes */ 6678 case 1: /* stop all writes */
6465 spin_lock_irq(&conf->device_lock); 6679 lock_all_device_hash_locks_irq(conf);
6466 /* '2' tells resync/reshape to pause so that all 6680 /* '2' tells resync/reshape to pause so that all
6467 * active stripes can drain 6681 * active stripes can drain
6468 */ 6682 */
6469 conf->quiesce = 2; 6683 conf->quiesce = 2;
6470 wait_event_lock_irq(conf->wait_for_stripe, 6684 wait_event_cmd(conf->wait_for_stripe,
6471 atomic_read(&conf->active_stripes) == 0 && 6685 atomic_read(&conf->active_stripes) == 0 &&
6472 atomic_read(&conf->active_aligned_reads) == 0, 6686 atomic_read(&conf->active_aligned_reads) == 0,
6473 conf->device_lock); 6687 unlock_all_device_hash_locks_irq(conf),
6688 lock_all_device_hash_locks_irq(conf));
6474 conf->quiesce = 1; 6689 conf->quiesce = 1;
6475 spin_unlock_irq(&conf->device_lock); 6690 unlock_all_device_hash_locks_irq(conf);
6476 /* allow reshape to continue */ 6691 /* allow reshape to continue */
6477 wake_up(&conf->wait_for_overlap); 6692 wake_up(&conf->wait_for_overlap);
6478 break; 6693 break;
6479 6694
6480 case 0: /* re-enable writes */ 6695 case 0: /* re-enable writes */
6481 spin_lock_irq(&conf->device_lock); 6696 lock_all_device_hash_locks_irq(conf);
6482 conf->quiesce = 0; 6697 conf->quiesce = 0;
6483 wake_up(&conf->wait_for_stripe); 6698 wake_up(&conf->wait_for_stripe);
6484 wake_up(&conf->wait_for_overlap); 6699 wake_up(&conf->wait_for_overlap);
6485 spin_unlock_irq(&conf->device_lock); 6700 unlock_all_device_hash_locks_irq(conf);
6486 break; 6701 break;
6487 } 6702 }
6488} 6703}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index b42e6b462eda..01ad8ae8f578 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -205,6 +205,7 @@ struct stripe_head {
205 short pd_idx; /* parity disk index */ 205 short pd_idx; /* parity disk index */
206 short qd_idx; /* 'Q' disk index for raid6 */ 206 short qd_idx; /* 'Q' disk index for raid6 */
207 short ddf_layout;/* use DDF ordering to calculate Q */ 207 short ddf_layout;/* use DDF ordering to calculate Q */
208 short hash_lock_index;
208 unsigned long state; /* state flags */ 209 unsigned long state; /* state flags */
209 atomic_t count; /* nr of active thread/requests */ 210 atomic_t count; /* nr of active thread/requests */
210 int bm_seq; /* sequence number for bitmap flushes */ 211 int bm_seq; /* sequence number for bitmap flushes */
@@ -367,9 +368,18 @@ struct disk_info {
367 struct md_rdev *rdev, *replacement; 368 struct md_rdev *rdev, *replacement;
368}; 369};
369 370
371/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
372 * This is because we sometimes take all the spinlocks
373 * and creating that much locking depth can cause
374 * problems.
375 */
376#define NR_STRIPE_HASH_LOCKS 8
377#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
378
370struct r5worker { 379struct r5worker {
371 struct work_struct work; 380 struct work_struct work;
372 struct r5worker_group *group; 381 struct r5worker_group *group;
382 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
373 bool working; 383 bool working;
374}; 384};
375 385
@@ -382,6 +392,8 @@ struct r5worker_group {
382 392
383struct r5conf { 393struct r5conf {
384 struct hlist_head *stripe_hashtbl; 394 struct hlist_head *stripe_hashtbl;
395 /* only protect corresponding hash list and inactive_list */
396 spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
385 struct mddev *mddev; 397 struct mddev *mddev;
386 int chunk_sectors; 398 int chunk_sectors;
387 int level, algorithm; 399 int level, algorithm;
@@ -462,7 +474,8 @@ struct r5conf {
462 * Free stripes pool 474 * Free stripes pool
463 */ 475 */
464 atomic_t active_stripes; 476 atomic_t active_stripes;
465 struct list_head inactive_list; 477 struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
478 atomic_t empty_inactive_list_nr;
466 struct llist_head released_stripes; 479 struct llist_head released_stripes;
467 wait_queue_head_t wait_for_stripe; 480 wait_queue_head_t wait_for_stripe;
468 wait_queue_head_t wait_for_overlap; 481 wait_queue_head_t wait_for_overlap;
@@ -477,6 +490,7 @@ struct r5conf {
477 * the new thread here until we fully activate the array. 490 * the new thread here until we fully activate the array.
478 */ 491 */
479 struct md_thread *thread; 492 struct md_thread *thread;
493 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
480 struct r5worker_group *worker_groups; 494 struct r5worker_group *worker_groups;
481 int group_cnt; 495 int group_cnt;
482 int worker_cnt_per_group; 496 int worker_cnt_per_group;
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index d0799e323364..9c9063cd3208 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -955,7 +955,7 @@ struct sms_rx_stats {
955 u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ 955 u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */
956 s32 SNR; /* dB */ 956 s32 SNR; /* dB */
957 u32 ber; /* Post Viterbi ber [1E-5] */ 957 u32 ber; /* Post Viterbi ber [1E-5] */
958 u32 ber_error_count; /* Number of erronous SYNC bits. */ 958 u32 ber_error_count; /* Number of erroneous SYNC bits. */
959 u32 ber_bit_count; /* Total number of SYNC bits. */ 959 u32 ber_bit_count; /* Total number of SYNC bits. */
960 u32 ts_per; /* Transport stream PER, 960 u32 ts_per; /* Transport stream PER,
961 0xFFFFFFFF indicate N/A */ 961 0xFFFFFFFF indicate N/A */
@@ -981,7 +981,7 @@ struct sms_rx_stats_ex {
981 u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ 981 u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */
982 s32 SNR; /* dB */ 982 s32 SNR; /* dB */
983 u32 ber; /* Post Viterbi ber [1E-5] */ 983 u32 ber; /* Post Viterbi ber [1E-5] */
984 u32 ber_error_count; /* Number of erronous SYNC bits. */ 984 u32 ber_error_count; /* Number of erroneous SYNC bits. */
985 u32 ber_bit_count; /* Total number of SYNC bits. */ 985 u32 ber_bit_count; /* Total number of SYNC bits. */
986 u32 ts_per; /* Transport stream PER, 986 u32 ts_per; /* Transport stream PER,
987 0xFFFFFFFF indicate N/A */ 987 0xFFFFFFFF indicate N/A */
diff --git a/drivers/media/common/siano/smsdvb.h b/drivers/media/common/siano/smsdvb.h
index 92c413ba0c79..ae36d0ae0fb1 100644
--- a/drivers/media/common/siano/smsdvb.h
+++ b/drivers/media/common/siano/smsdvb.h
@@ -95,7 +95,7 @@ struct RECEPTION_STATISTICS_PER_SLICES_S {
95 u32 is_demod_locked; /* 0 - not locked, 1 - locked */ 95 u32 is_demod_locked; /* 0 - not locked, 1 - locked */
96 96
97 u32 ber_bit_count; /* Total number of SYNC bits. */ 97 u32 ber_bit_count; /* Total number of SYNC bits. */
98 u32 ber_error_count; /* Number of erronous SYNC bits. */ 98 u32 ber_error_count; /* Number of erroneous SYNC bits. */
99 99
100 s32 MRC_SNR; /* dB */ 100 s32 MRC_SNR; /* dB */
101 s32 mrc_in_band_pwr; /* In band power in dBM */ 101 s32 mrc_in_band_pwr; /* In band power in dBM */
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 58de4410c525..6c7ff0cdcd32 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -435,7 +435,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
435 dprintk_tscheck("TEI detected. " 435 dprintk_tscheck("TEI detected. "
436 "PID=0x%x data1=0x%x\n", 436 "PID=0x%x data1=0x%x\n",
437 pid, buf[1]); 437 pid, buf[1]);
438 /* data in this packet cant be trusted - drop it unless 438 /* data in this packet can't be trusted - drop it unless
439 * module option dvb_demux_feed_err_pkts is set */ 439 * module option dvb_demux_feed_err_pkts is set */
440 if (!dvb_demux_feed_err_pkts) 440 if (!dvb_demux_feed_err_pkts)
441 return; 441 return;
@@ -1032,8 +1032,13 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
1032 return -EINVAL; 1032 return -EINVAL;
1033 } 1033 }
1034 1034
1035 if (feed->is_filtering) 1035 if (feed->is_filtering) {
1036 /* release dvbdmx->mutex as far as it is
1037 acquired by stop_filtering() itself */
1038 mutex_unlock(&dvbdmx->mutex);
1036 feed->stop_filtering(feed); 1039 feed->stop_filtering(feed);
1040 mutex_lock(&dvbdmx->mutex);
1041 }
1037 1042
1038 spin_lock_irq(&dvbdmx->lock); 1043 spin_lock_irq(&dvbdmx->lock);
1039 f = dvbdmxfeed->filter; 1044 f = dvbdmxfeed->filter;
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 30ee59052157..65728c25ea05 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -170,18 +170,18 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
170static int af9033_wr_reg_val_tab(struct af9033_state *state, 170static int af9033_wr_reg_val_tab(struct af9033_state *state,
171 const struct reg_val *tab, int tab_len) 171 const struct reg_val *tab, int tab_len)
172{ 172{
173#define MAX_TAB_LEN 212
173 int ret, i, j; 174 int ret, i, j;
174 u8 buf[MAX_XFER_SIZE]; 175 u8 buf[1 + MAX_TAB_LEN];
176
177 dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
175 178
176 if (tab_len > sizeof(buf)) { 179 if (tab_len > sizeof(buf)) {
177 dev_warn(&state->i2c->dev, 180 dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n",
178 "%s: i2c wr len=%d is too big!\n", 181 KBUILD_MODNAME, tab_len);
179 KBUILD_MODNAME, tab_len);
180 return -EINVAL; 182 return -EINVAL;
181 } 183 }
182 184
183 dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
184
185 for (i = 0, j = 0; i < tab_len; i++) { 185 for (i = 0, j = 0; i < tab_len; i++) {
186 buf[j] = tab[i].val; 186 buf[j] = tab[i].val;
187 187
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 125a44041011..5c6ab4921bf1 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -78,7 +78,7 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
78 78
79 num = if_freq / 1000; /* Hz => kHz */ 79 num = if_freq / 1000; /* Hz => kHz */
80 num *= 0x4000; 80 num *= 0x4000;
81 if_ctl = cxd2820r_div_u64_round_closest(num, 41000); 81 if_ctl = 0x4000 - cxd2820r_div_u64_round_closest(num, 41000);
82 buf[0] = (if_ctl >> 8) & 0x3f; 82 buf[0] = (if_ctl >> 8) & 0x3f;
83 buf[1] = (if_ctl >> 0) & 0xff; 83 buf[1] = (if_ctl >> 0) & 0xff;
84 84
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 90536147bf04..6dbbee453ee1 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -3048,7 +3048,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
3048 dib8000_set_diversity_in(state->fe[0], state->diversity_onoff); 3048 dib8000_set_diversity_in(state->fe[0], state->diversity_onoff);
3049 3049
3050 locks = (dib8000_read_word(state, 180) >> 6) & 0x3f; /* P_coff_winlen ? */ 3050 locks = (dib8000_read_word(state, 180) >> 6) & 0x3f; /* P_coff_winlen ? */
3051 /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this lenght to lock */ 3051 /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this length to lock */
3052 *timeout = dib8000_get_timeout(state, 2 * locks, SYMBOL_DEPENDENT_ON); 3052 *timeout = dib8000_get_timeout(state, 2 * locks, SYMBOL_DEPENDENT_ON);
3053 *tune_state = CT_DEMOD_STEP_5; 3053 *tune_state = CT_DEMOD_STEP_5;
3054 break; 3054 break;
@@ -3115,7 +3115,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
3115 3115
3116 case CT_DEMOD_STEP_9: /* 39 */ 3116 case CT_DEMOD_STEP_9: /* 39 */
3117 if ((state->revision == 0x8090) || ((dib8000_read_word(state, 1291) >> 9) & 0x1)) { /* fe capable of deinterleaving : esram */ 3117 if ((state->revision == 0x8090) || ((dib8000_read_word(state, 1291) >> 9) & 0x1)) { /* fe capable of deinterleaving : esram */
3118 /* defines timeout for mpeg lock depending on interleaver lenght of longest layer */ 3118 /* defines timeout for mpeg lock depending on interleaver length of longest layer */
3119 for (i = 0; i < 3; i++) { 3119 for (i = 0; i < 3; i++) {
3120 if (c->layer[i].interleaving >= deeper_interleaver) { 3120 if (c->layer[i].interleaving >= deeper_interleaver) {
3121 dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving); 3121 dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving);
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index d416c15691da..bf29a3f0e6f0 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1191,7 +1191,7 @@ static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable)
1191 goto error; 1191 goto error;
1192 1192
1193 if (state->m_enable_parallel == true) { 1193 if (state->m_enable_parallel == true) {
1194 /* paralel -> enable MD1 to MD7 */ 1194 /* parallel -> enable MD1 to MD7 */
1195 status = write16(state, SIO_PDR_MD1_CFG__A, 1195 status = write16(state, SIO_PDR_MD1_CFG__A,
1196 sio_pdr_mdx_cfg); 1196 sio_pdr_mdx_cfg);
1197 if (status < 0) 1197 if (status < 0)
@@ -1428,7 +1428,7 @@ static int mpegts_stop(struct drxk_state *state)
1428 1428
1429 dprintk(1, "\n"); 1429 dprintk(1, "\n");
1430 1430
1431 /* Gracefull shutdown (byte boundaries) */ 1431 /* Graceful shutdown (byte boundaries) */
1432 status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode); 1432 status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode);
1433 if (status < 0) 1433 if (status < 0)
1434 goto error; 1434 goto error;
@@ -2021,7 +2021,7 @@ static int mpegts_dto_setup(struct drxk_state *state,
2021 fec_oc_dto_burst_len = 204; 2021 fec_oc_dto_burst_len = 204;
2022 } 2022 }
2023 2023
2024 /* Check serial or parrallel output */ 2024 /* Check serial or parallel output */
2025 fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M)); 2025 fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M));
2026 if (state->m_enable_parallel == false) { 2026 if (state->m_enable_parallel == false) {
2027 /* MPEG data output is serial -> set ipr_mode[0] */ 2027 /* MPEG data output is serial -> set ipr_mode[0] */
@@ -2908,7 +2908,7 @@ static int adc_synchronization(struct drxk_state *state)
2908 goto error; 2908 goto error;
2909 2909
2910 if (count == 1) { 2910 if (count == 1) {
2911 /* Try sampling on a diffrent edge */ 2911 /* Try sampling on a different edge */
2912 u16 clk_neg = 0; 2912 u16 clk_neg = 0;
2913 2913
2914 status = read16(state, IQM_AF_CLKNEG__A, &clk_neg); 2914 status = read16(state, IQM_AF_CLKNEG__A, &clk_neg);
@@ -3306,7 +3306,7 @@ static int dvbt_sc_command(struct drxk_state *state,
3306 if (status < 0) 3306 if (status < 0)
3307 goto error; 3307 goto error;
3308 3308
3309 /* Retreive results parameters from SC */ 3309 /* Retrieve results parameters from SC */
3310 switch (cmd) { 3310 switch (cmd) {
3311 /* All commands yielding 5 results */ 3311 /* All commands yielding 5 results */
3312 /* All commands yielding 4 results */ 3312 /* All commands yielding 4 results */
@@ -3849,7 +3849,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
3849 break; 3849 break;
3850 } 3850 }
3851#if 0 3851#if 0
3852 /* No hierachical channels support in BDA */ 3852 /* No hierarchical channels support in BDA */
3853 /* Priority (only for hierarchical channels) */ 3853 /* Priority (only for hierarchical channels) */
3854 switch (channel->priority) { 3854 switch (channel->priority) {
3855 case DRX_PRIORITY_LOW: 3855 case DRX_PRIORITY_LOW:
@@ -4081,7 +4081,7 @@ error:
4081/*============================================================================*/ 4081/*============================================================================*/
4082 4082
4083/** 4083/**
4084* \brief Retreive lock status . 4084* \brief Retrieve lock status .
4085* \param demod Pointer to demodulator instance. 4085* \param demod Pointer to demodulator instance.
4086* \param lockStat Pointer to lock status structure. 4086* \param lockStat Pointer to lock status structure.
4087* \return DRXStatus_t. 4087* \return DRXStatus_t.
@@ -6174,7 +6174,7 @@ static int init_drxk(struct drxk_state *state)
6174 goto error; 6174 goto error;
6175 6175
6176 /* Stamp driver version number in SCU data RAM in BCD code 6176 /* Stamp driver version number in SCU data RAM in BCD code
6177 Done to enable field application engineers to retreive drxdriver version 6177 Done to enable field application engineers to retrieve drxdriver version
6178 via I2C from SCU RAM. 6178 via I2C from SCU RAM.
6179 Not using SCU command interface for SCU register access since no 6179 Not using SCU command interface for SCU register access since no
6180 microcode may be present. 6180 microcode may be present.
@@ -6399,7 +6399,7 @@ static int drxk_set_parameters(struct dvb_frontend *fe)
6399 fe->ops.tuner_ops.get_if_frequency(fe, &IF); 6399 fe->ops.tuner_ops.get_if_frequency(fe, &IF);
6400 start(state, 0, IF); 6400 start(state, 0, IF);
6401 6401
6402 /* After set_frontend, stats aren't avaliable */ 6402 /* After set_frontend, stats aren't available */
6403 p->strength.stat[0].scale = FE_SCALE_RELATIVE; 6403 p->strength.stat[0].scale = FE_SCALE_RELATIVE;
6404 p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; 6404 p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
6405 p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; 6405 p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 7efb796c472c..50e8b63e5169 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -710,6 +710,7 @@ struct dvb_frontend *rtl2830_attach(const struct rtl2830_config *cfg,
710 sizeof(priv->tuner_i2c_adapter.name)); 710 sizeof(priv->tuner_i2c_adapter.name));
711 priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo; 711 priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo;
712 priv->tuner_i2c_adapter.algo_data = NULL; 712 priv->tuner_i2c_adapter.algo_data = NULL;
713 priv->tuner_i2c_adapter.dev.parent = &i2c->dev;
713 i2c_set_adapdata(&priv->tuner_i2c_adapter, priv); 714 i2c_set_adapdata(&priv->tuner_i2c_adapter, priv);
714 if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) { 715 if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) {
715 dev_err(&i2c->dev, 716 dev_err(&i2c->dev,
diff --git a/drivers/media/i2c/adv7183_regs.h b/drivers/media/i2c/adv7183_regs.h
index 4a5b7d211d2f..b253d400e817 100644
--- a/drivers/media/i2c/adv7183_regs.h
+++ b/drivers/media/i2c/adv7183_regs.h
@@ -52,9 +52,9 @@
52#define ADV7183_VS_FIELD_CTRL_1 0x31 /* Vsync field control 1 */ 52#define ADV7183_VS_FIELD_CTRL_1 0x31 /* Vsync field control 1 */
53#define ADV7183_VS_FIELD_CTRL_2 0x32 /* Vsync field control 2 */ 53#define ADV7183_VS_FIELD_CTRL_2 0x32 /* Vsync field control 2 */
54#define ADV7183_VS_FIELD_CTRL_3 0x33 /* Vsync field control 3 */ 54#define ADV7183_VS_FIELD_CTRL_3 0x33 /* Vsync field control 3 */
55#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync positon control 1 */ 55#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync position control 1 */
56#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync positon control 2 */ 56#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync position control 2 */
57#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync positon control 3 */ 57#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync position control 3 */
58#define ADV7183_POLARITY 0x37 /* Polarity */ 58#define ADV7183_POLARITY 0x37 /* Polarity */
59#define ADV7183_NTSC_COMB_CTRL 0x38 /* NTSC comb control */ 59#define ADV7183_NTSC_COMB_CTRL 0x38 /* NTSC comb control */
60#define ADV7183_PAL_COMB_CTRL 0x39 /* PAL comb control */ 60#define ADV7183_PAL_COMB_CTRL 0x39 /* PAL comb control */
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index fbfdd2fc2a36..a324106b9f11 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -877,7 +877,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
877 break; 877 break;
878 case ADV7604_MODE_HDMI: 878 case ADV7604_MODE_HDMI:
879 /* set default prim_mode/vid_std for HDMI 879 /* set default prim_mode/vid_std for HDMI
880 accoring to [REF_03, c. 4.2] */ 880 according to [REF_03, c. 4.2] */
881 io_write(sd, 0x00, 0x02); /* video std */ 881 io_write(sd, 0x00, 0x02); /* video std */
882 io_write(sd, 0x01, 0x06); /* prim mode */ 882 io_write(sd, 0x01, 0x06); /* prim mode */
883 break; 883 break;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 22f729d66a96..b154f36740b4 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1013,7 +1013,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
1013 break; 1013 break;
1014 case ADV7842_MODE_HDMI: 1014 case ADV7842_MODE_HDMI:
1015 /* set default prim_mode/vid_std for HDMI 1015 /* set default prim_mode/vid_std for HDMI
1016 accoring to [REF_03, c. 4.2] */ 1016 according to [REF_03, c. 4.2] */
1017 io_write(sd, 0x00, 0x02); /* video std */ 1017 io_write(sd, 0x00, 0x02); /* video std */
1018 io_write(sd, 0x01, 0x06); /* prim mode */ 1018 io_write(sd, 0x01, 0x06); /* prim mode */
1019 break; 1019 break;
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 82bf5679da30..99ee456700f4 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -394,7 +394,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
394 394
395 if (!rc) { 395 if (!rc) {
396 /* 396 /*
397 * If platform_data doesn't specify rc_dev, initilize it 397 * If platform_data doesn't specify rc_dev, initialize it
398 * internally 398 * internally
399 */ 399 */
400 rc = rc_allocate_device(); 400 rc = rc_allocate_device();
diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c
index f34429e452ab..a60931e66312 100644
--- a/drivers/media/i2c/m5mols/m5mols_controls.c
+++ b/drivers/media/i2c/m5mols/m5mols_controls.c
@@ -544,7 +544,7 @@ int m5mols_init_controls(struct v4l2_subdev *sd)
544 u16 zoom_step; 544 u16 zoom_step;
545 int ret; 545 int ret;
546 546
547 /* Determine the firmware dependant control range and step values */ 547 /* Determine the firmware dependent control range and step values */
548 ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max); 548 ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max);
549 if (ret < 0) 549 if (ret < 0)
550 return ret; 550 return ret;
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 4734836fe5a4..1c2303d18bf4 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -19,6 +19,7 @@
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/of.h>
22#include <linux/of_gpio.h> 23#include <linux/of_gpio.h>
23#include <linux/pm.h> 24#include <linux/pm.h>
24#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 6fec9384d86e..e7f555cc827a 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1460,7 +1460,7 @@ static int s5c73m3_oif_registered(struct v4l2_subdev *sd)
1460 mutex_unlock(&state->lock); 1460 mutex_unlock(&state->lock);
1461 1461
1462 v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n", 1462 v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n",
1463 __func__, ret ? "failed" : "succeded", ret); 1463 __func__, ret ? "failed" : "succeeded", ret);
1464 1464
1465 return ret; 1465 return ret;
1466} 1466}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index 9d2c08652246..9dfa516f6944 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -393,7 +393,7 @@ struct s5c73m3 {
393 393
394 /* External master clock frequency */ 394 /* External master clock frequency */
395 u32 mclk_frequency; 395 u32 mclk_frequency;
396 /* Video bus type - MIPI-CSI2/paralell */ 396 /* Video bus type - MIPI-CSI2/parallel */
397 enum v4l2_mbus_type bus_type; 397 enum v4l2_mbus_type bus_type;
398 398
399 const struct s5c73m3_frame_size *sensor_pix_size[2]; 399 const struct s5c73m3_frame_size *sensor_pix_size[2];
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 637d02634527..afdbcb045cee 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1699,7 +1699,7 @@ static void saa711x_write_platform_data(struct saa711x_state *state,
1699 * the analog demod. 1699 * the analog demod.
1700 * If the tuner is not found, it returns -ENODEV. 1700 * If the tuner is not found, it returns -ENODEV.
1701 * If auto-detection is disabled and the tuner doesn't match what it was 1701 * If auto-detection is disabled and the tuner doesn't match what it was
1702 * requred, it returns -EINVAL and fills 'name'. 1702 * required, it returns -EINVAL and fills 'name'.
1703 * If the chip is found, it returns the chip ID and fills 'name'. 1703 * If the chip is found, it returns the chip ID and fills 'name'.
1704 */ 1704 */
1705static int saa711x_detect_chip(struct i2c_client *client, 1705static int saa711x_detect_chip(struct i2c_client *client,
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index 0a5c5d4fedd6..d2daa6a8f272 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -642,7 +642,7 @@ static const struct ov5642_datafmt
642static int reg_read(struct i2c_client *client, u16 reg, u8 *val) 642static int reg_read(struct i2c_client *client, u16 reg, u8 *val)
643{ 643{
644 int ret; 644 int ret;
645 /* We have 16-bit i2c addresses - care for endianess */ 645 /* We have 16-bit i2c addresses - care for endianness */
646 unsigned char data[2] = { reg >> 8, reg & 0xff }; 646 unsigned char data[2] = { reg >> 8, reg & 0xff };
647 647
648 ret = i2c_master_send(client, data, 2); 648 ret = i2c_master_send(client, data, 2);
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 42276d93624c..ed9ae8875348 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -83,7 +83,8 @@ static int ths7303_write(struct v4l2_subdev *sd, u8 reg, u8 val)
83} 83}
84 84
85/* following function is used to set ths7303 */ 85/* following function is used to set ths7303 */
86int ths7303_setval(struct v4l2_subdev *sd, enum ths7303_filter_mode mode) 86static int ths7303_setval(struct v4l2_subdev *sd,
87 enum ths7303_filter_mode mode)
87{ 88{
88 struct i2c_client *client = v4l2_get_subdevdata(sd); 89 struct i2c_client *client = v4l2_get_subdevdata(sd);
89 struct ths7303_state *state = to_state(sd); 90 struct ths7303_state *state = to_state(sd);
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 3f584a7d0781..bee7946faa7c 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -130,12 +130,10 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
130 return -EINVAL; 130 return -EINVAL;
131 } 131 }
132 state->input = input; 132 state->input = input;
133 if (!v4l2_ctrl_g_ctrl(state->mute)) 133 if (v4l2_ctrl_g_ctrl(state->mute))
134 return 0; 134 return 0;
135 if (!v4l2_ctrl_g_ctrl(state->vol)) 135 if (!v4l2_ctrl_g_ctrl(state->vol))
136 return 0; 136 return 0;
137 if (!v4l2_ctrl_g_ctrl(state->bal))
138 return 0;
139 wm8775_set_audio(sd, 1); 137 wm8775_set_audio(sd, 1);
140 return 0; 138 return 0;
141} 139}
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index a3b1ee9c00d7..92a06fd85865 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -4182,7 +4182,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
4182 } 4182 }
4183 btv->std = V4L2_STD_PAL; 4183 btv->std = V4L2_STD_PAL;
4184 init_irqreg(btv); 4184 init_irqreg(btv);
4185 v4l2_ctrl_handler_setup(hdl); 4185 if (!bttv_tvcards[btv->c.type].no_video)
4186 v4l2_ctrl_handler_setup(hdl);
4186 if (hdl->error) { 4187 if (hdl->error) {
4187 result = hdl->error; 4188 result = hdl->error;
4188 goto fail2; 4189 goto fail2;
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 2767c64df0c8..57f4688ea55b 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -262,7 +262,7 @@ struct cx18_options {
262}; 262};
263 263
264/* per-mdl bit flags */ 264/* per-mdl bit flags */
265#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianess swapped */ 265#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianness swapped */
266 266
267/* per-stream, s_flags */ 267/* per-stream, s_flags */
268#define CX18_F_S_CLAIMED 3 /* this stream is claimed */ 268#define CX18_F_S_CLAIMED 3 /* this stream is claimed */
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index e3fc2c71808a..95666eee7b27 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -427,7 +427,7 @@ int mc417_register_read(struct cx23885_dev *dev, u16 address, u32 *value)
427 cx_write(MC417_RWD, regval); 427 cx_write(MC417_RWD, regval);
428 428
429 /* Transition RD to effect read transaction across bus. 429 /* Transition RD to effect read transaction across bus.
430 * Transtion 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)? 430 * Transition 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)?
431 * Should it be 0x9000 -> 0xF000 (also why is RDY being set, its 431 * Should it be 0x9000 -> 0xF000 (also why is RDY being set, its
432 * input only...) 432 * input only...)
433 */ 433 */
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 8164d74b46a4..655d6854a8d7 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -401,7 +401,7 @@ static int pluto_hw_init(struct pluto *pluto)
401 /* set automatic LED control by FPGA */ 401 /* set automatic LED control by FPGA */
402 pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED); 402 pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED);
403 403
404 /* set data endianess */ 404 /* set data endianness */
405#ifdef __LITTLE_ENDIAN 405#ifdef __LITTLE_ENDIAN
406 pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END); 406 pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END);
407#else 407#else
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 57ef5456f1e8..1bf06970ca3e 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1354,9 +1354,11 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
1354 if (fw_debug) { 1354 if (fw_debug) {
1355 dev->kthread = kthread_run(saa7164_thread_function, dev, 1355 dev->kthread = kthread_run(saa7164_thread_function, dev,
1356 "saa7164 debug"); 1356 "saa7164 debug");
1357 if (!dev->kthread) 1357 if (IS_ERR(dev->kthread)) {
1358 dev->kthread = NULL;
1358 printk(KERN_ERR "%s() Failed to create " 1359 printk(KERN_ERR "%s() Failed to create "
1359 "debug kernel thread\n", __func__); 1360 "debug kernel thread\n", __func__);
1361 }
1360 } 1362 }
1361 1363
1362 } /* != BOARD_UNKNOWN */ 1364 } /* != BOARD_UNKNOWN */
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index bd72fb97fea5..61f3dbcc259f 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -1434,7 +1434,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
1434 if (q_data->fourcc == V4L2_PIX_FMT_H264 && 1434 if (q_data->fourcc == V4L2_PIX_FMT_H264 &&
1435 vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { 1435 vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1436 /* 1436 /*
1437 * For backwards compatiblity, queuing an empty buffer marks 1437 * For backwards compatibility, queuing an empty buffer marks
1438 * the stream end 1438 * the stream end
1439 */ 1439 */
1440 if (vb2_get_plane_payload(vb, 0) == 0) 1440 if (vb2_get_plane_payload(vb, 0) == 0)
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index 3d66d88ea3a1..f7915695c907 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1039,7 +1039,7 @@ static int fimc_runtime_resume(struct device *dev)
1039 1039
1040 dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state); 1040 dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
1041 1041
1042 /* Enable clocks and perform basic initalization */ 1042 /* Enable clocks and perform basic initialization */
1043 clk_enable(fimc->clock[CLK_GATE]); 1043 clk_enable(fimc->clock[CLK_GATE]);
1044 fimc_hw_reset(fimc); 1044 fimc_hw_reset(fimc);
1045 1045
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 7a4ee4c0449d..c1bce170df6f 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -759,7 +759,7 @@ static int fimc_md_register_platform_entity(struct fimc_md *fmd,
759 goto dev_unlock; 759 goto dev_unlock;
760 760
761 drvdata = dev_get_drvdata(dev); 761 drvdata = dev_get_drvdata(dev);
762 /* Some subdev didn't probe succesfully id drvdata is NULL */ 762 /* Some subdev didn't probe successfully id drvdata is NULL */
763 if (drvdata) { 763 if (drvdata) {
764 switch (plat_entity) { 764 switch (plat_entity) {
765 case IDX_FIMC: 765 case IDX_FIMC:
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 36513e896413..65cab70fefcb 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
341 ctx->xt->dir = DMA_MEM_TO_MEM; 341 ctx->xt->dir = DMA_MEM_TO_MEM;
342 ctx->xt->src_sgl = false; 342 ctx->xt->src_sgl = false;
343 ctx->xt->dst_sgl = true; 343 ctx->xt->dst_sgl = true;
344 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | 344 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
345 DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP;
346 345
347 tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); 346 tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags);
348 if (tx == NULL) { 347 if (tx == NULL) {
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index 3458fa0e2fd5..054507f16734 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -142,12 +142,6 @@ static int mmpcam_power_up(struct mcam_camera *mcam)
142 struct mmp_camera *cam = mcam_to_cam(mcam); 142 struct mmp_camera *cam = mcam_to_cam(mcam);
143 struct mmp_camera_platform_data *pdata; 143 struct mmp_camera_platform_data *pdata;
144 144
145 if (mcam->bus_type == V4L2_MBUS_CSI2) {
146 cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
147 if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
148 return PTR_ERR(cam->mipi_clk);
149 }
150
151/* 145/*
152 * Turn on power and clocks to the controller. 146 * Turn on power and clocks to the controller.
153 */ 147 */
@@ -186,12 +180,6 @@ static void mmpcam_power_down(struct mcam_camera *mcam)
186 gpio_set_value(pdata->sensor_power_gpio, 0); 180 gpio_set_value(pdata->sensor_power_gpio, 0);
187 gpio_set_value(pdata->sensor_reset_gpio, 0); 181 gpio_set_value(pdata->sensor_reset_gpio, 0);
188 182
189 if (mcam->bus_type == V4L2_MBUS_CSI2 && !IS_ERR(cam->mipi_clk)) {
190 if (cam->mipi_clk)
191 devm_clk_put(mcam->dev, cam->mipi_clk);
192 cam->mipi_clk = NULL;
193 }
194
195 mcam_clk_disable(mcam); 183 mcam_clk_disable(mcam);
196} 184}
197 185
@@ -292,8 +280,9 @@ void mmpcam_calc_dphy(struct mcam_camera *mcam)
292 return; 280 return;
293 281
294 /* get the escape clk, this is hard coded */ 282 /* get the escape clk, this is hard coded */
283 clk_prepare_enable(cam->mipi_clk);
295 tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12; 284 tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12;
296 285 clk_disable_unprepare(cam->mipi_clk);
297 /* 286 /*
298 * dphy[2] - CSI2_DPHY6: 287 * dphy[2] - CSI2_DPHY6:
299 * bit 0 ~ bit 7: CK Term Enable 288 * bit 0 ~ bit 7: CK Term Enable
@@ -325,19 +314,6 @@ static irqreturn_t mmpcam_irq(int irq, void *data)
325 return IRQ_RETVAL(handled); 314 return IRQ_RETVAL(handled);
326} 315}
327 316
328static void mcam_deinit_clk(struct mcam_camera *mcam)
329{
330 unsigned int i;
331
332 for (i = 0; i < NR_MCAM_CLK; i++) {
333 if (!IS_ERR(mcam->clk[i])) {
334 if (mcam->clk[i])
335 devm_clk_put(mcam->dev, mcam->clk[i]);
336 }
337 mcam->clk[i] = NULL;
338 }
339}
340
341static void mcam_init_clk(struct mcam_camera *mcam) 317static void mcam_init_clk(struct mcam_camera *mcam)
342{ 318{
343 unsigned int i; 319 unsigned int i;
@@ -371,7 +347,6 @@ static int mmpcam_probe(struct platform_device *pdev)
371 if (cam == NULL) 347 if (cam == NULL)
372 return -ENOMEM; 348 return -ENOMEM;
373 cam->pdev = pdev; 349 cam->pdev = pdev;
374 cam->mipi_clk = NULL;
375 INIT_LIST_HEAD(&cam->devlist); 350 INIT_LIST_HEAD(&cam->devlist);
376 351
377 mcam = &cam->mcam; 352 mcam = &cam->mcam;
@@ -387,6 +362,11 @@ static int mmpcam_probe(struct platform_device *pdev)
387 mcam->mclk_div = pdata->mclk_div; 362 mcam->mclk_div = pdata->mclk_div;
388 mcam->bus_type = pdata->bus_type; 363 mcam->bus_type = pdata->bus_type;
389 mcam->dphy = pdata->dphy; 364 mcam->dphy = pdata->dphy;
365 if (mcam->bus_type == V4L2_MBUS_CSI2) {
366 cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
367 if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
368 return PTR_ERR(cam->mipi_clk);
369 }
390 mcam->mipi_enabled = false; 370 mcam->mipi_enabled = false;
391 mcam->lane = pdata->lane; 371 mcam->lane = pdata->lane;
392 mcam->chip_id = MCAM_ARMADA610; 372 mcam->chip_id = MCAM_ARMADA610;
@@ -444,7 +424,7 @@ static int mmpcam_probe(struct platform_device *pdev)
444 */ 424 */
445 ret = mmpcam_power_up(mcam); 425 ret = mmpcam_power_up(mcam);
446 if (ret) 426 if (ret)
447 goto out_deinit_clk; 427 return ret;
448 ret = mccic_register(mcam); 428 ret = mccic_register(mcam);
449 if (ret) 429 if (ret)
450 goto out_power_down; 430 goto out_power_down;
@@ -469,8 +449,6 @@ out_unregister:
469 mccic_shutdown(mcam); 449 mccic_shutdown(mcam);
470out_power_down: 450out_power_down:
471 mmpcam_power_down(mcam); 451 mmpcam_power_down(mcam);
472out_deinit_clk:
473 mcam_deinit_clk(mcam);
474 return ret; 452 return ret;
475} 453}
476 454
@@ -478,18 +456,10 @@ out_deinit_clk:
478static int mmpcam_remove(struct mmp_camera *cam) 456static int mmpcam_remove(struct mmp_camera *cam)
479{ 457{
480 struct mcam_camera *mcam = &cam->mcam; 458 struct mcam_camera *mcam = &cam->mcam;
481 struct mmp_camera_platform_data *pdata;
482 459
483 mmpcam_remove_device(cam); 460 mmpcam_remove_device(cam);
484 mccic_shutdown(mcam); 461 mccic_shutdown(mcam);
485 mmpcam_power_down(mcam); 462 mmpcam_power_down(mcam);
486 pdata = cam->pdev->dev.platform_data;
487 gpio_free(pdata->sensor_reset_gpio);
488 gpio_free(pdata->sensor_power_gpio);
489 mcam_deinit_clk(mcam);
490 iounmap(cam->power_regs);
491 iounmap(mcam->regs);
492 kfree(cam);
493 return 0; 463 return 0;
494} 464}
495 465
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 1c3608039663..561bce8ffb1b 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1673,7 +1673,7 @@ void omap3isp_print_status(struct isp_device *isp)
1673 * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in 1673 * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in
1674 * resume(), and the the pipelines are restarted in complete(). 1674 * resume(), and the the pipelines are restarted in complete().
1675 * 1675 *
1676 * TODO: PM dependencies between the ISP and sensors are not modeled explicitly 1676 * TODO: PM dependencies between the ISP and sensors are not modelled explicitly
1677 * yet. 1677 * yet.
1678 */ 1678 */
1679static int isp_pm_prepare(struct device *dev) 1679static int isp_pm_prepare(struct device *dev)
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index a908d006f527..f6304bb074f5 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -339,14 +339,11 @@ __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
339 if (subdev == NULL) 339 if (subdev == NULL)
340 return -EINVAL; 340 return -EINVAL;
341 341
342 mutex_lock(&video->mutex);
343
344 fmt.pad = pad; 342 fmt.pad = pad;
345 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 343 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
346 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
347 if (ret == -ENOIOCTLCMD)
348 ret = -EINVAL;
349 344
345 mutex_lock(&video->mutex);
346 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
350 mutex_unlock(&video->mutex); 347 mutex_unlock(&video->mutex);
351 348
352 if (ret) 349 if (ret)
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h
index 9319e93599ae..6ccc3f8c122a 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc.h
@@ -382,7 +382,7 @@
382#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16 382#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16
383#define S5P_FIMV_R2H_CMD_ERR_RET 32 383#define S5P_FIMV_R2H_CMD_ERR_RET 32
384 384
385/* Dummy definition for MFCv6 compatibilty */ 385/* Dummy definition for MFCv6 compatibility */
386#define S5P_FIMV_CODEC_H264_MVC_DEC -1 386#define S5P_FIMV_CODEC_H264_MVC_DEC -1
387#define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1 387#define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1
388#define S5P_FIMV_MFC_RESET -1 388#define S5P_FIMV_MFC_RESET -1
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 5f2c4ad6c2cb..e46067a57853 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -239,7 +239,7 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
239 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); 239 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
240 240
241 /* Copy timestamp / timecode from decoded src to dst and set 241 /* Copy timestamp / timecode from decoded src to dst and set
242 appropraite flags */ 242 appropriate flags */
243 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 243 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
244 list_for_each_entry(dst_buf, &ctx->dst_queue, list) { 244 list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
245 if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) { 245 if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
@@ -428,7 +428,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
428 case MFCINST_FINISHING: 428 case MFCINST_FINISHING:
429 case MFCINST_FINISHED: 429 case MFCINST_FINISHED:
430 case MFCINST_RUNNING: 430 case MFCINST_RUNNING:
431 /* It is higly probable that an error occured 431 /* It is highly probable that an error occurred
432 * while decoding a frame */ 432 * while decoding a frame */
433 clear_work_bit(ctx); 433 clear_work_bit(ctx);
434 ctx->state = MFCINST_ERROR; 434 ctx->state = MFCINST_ERROR;
@@ -611,7 +611,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
611 mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err); 611 mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
612 switch (reason) { 612 switch (reason) {
613 case S5P_MFC_R2H_CMD_ERR_RET: 613 case S5P_MFC_R2H_CMD_ERR_RET:
614 /* An error has occured */ 614 /* An error has occurred */
615 if (ctx->state == MFCINST_RUNNING && 615 if (ctx->state == MFCINST_RUNNING &&
616 s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >= 616 s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
617 dev->warn_start) 617 dev->warn_start)
@@ -840,7 +840,7 @@ static int s5p_mfc_open(struct file *file)
840 mutex_unlock(&dev->mfc_mutex); 840 mutex_unlock(&dev->mfc_mutex);
841 mfc_debug_leave(); 841 mfc_debug_leave();
842 return ret; 842 return ret;
843 /* Deinit when failure occured */ 843 /* Deinit when failure occurred */
844err_queue_init: 844err_queue_init:
845 if (dev->num_inst == 1) 845 if (dev->num_inst == 1)
846 s5p_mfc_deinit_hw(dev); 846 s5p_mfc_deinit_hw(dev);
@@ -881,14 +881,14 @@ static int s5p_mfc_release(struct file *file)
881 /* Mark context as idle */ 881 /* Mark context as idle */
882 clear_work_bit_irqsave(ctx); 882 clear_work_bit_irqsave(ctx);
883 /* If instance was initialised then 883 /* If instance was initialised then
884 * return instance and free reosurces */ 884 * return instance and free resources */
885 if (ctx->inst_no != MFC_NO_INSTANCE_SET) { 885 if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
886 mfc_debug(2, "Has to free instance\n"); 886 mfc_debug(2, "Has to free instance\n");
887 ctx->state = MFCINST_RETURN_INST; 887 ctx->state = MFCINST_RETURN_INST;
888 set_work_bit_irqsave(ctx); 888 set_work_bit_irqsave(ctx);
889 s5p_mfc_clean_ctx_int_flags(ctx); 889 s5p_mfc_clean_ctx_int_flags(ctx);
890 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); 890 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
891 /* Wait until instance is returned or timeout occured */ 891 /* Wait until instance is returned or timeout occurred */
892 if (s5p_mfc_wait_for_done_ctx 892 if (s5p_mfc_wait_for_done_ctx
893 (ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) { 893 (ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
894 s5p_mfc_clock_off(); 894 s5p_mfc_clock_off();
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 7cab6849fb5b..2475a3c9a0a6 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -69,7 +69,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
69 69
70 } else { 70 } else {
71 /* In this case bank2 can point to the same address as bank1. 71 /* In this case bank2 can point to the same address as bank1.
72 * Firmware will always occupy the beggining of this area so it is 72 * Firmware will always occupy the beginning of this area so it is
73 * impossible having a video frame buffer with zero address. */ 73 * impossible having a video frame buffer with zero address. */
74 dev->bank2 = dev->bank1; 74 dev->bank2 = dev->bank1;
75 } 75 }
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index 04e6490a45be..fb2acc53112a 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -65,7 +65,7 @@ struct mxr_format {
65 int num_subframes; 65 int num_subframes;
66 /** specifies to which subframe belong given plane */ 66 /** specifies to which subframe belong given plane */
67 int plane2subframe[MXR_MAX_PLANES]; 67 int plane2subframe[MXR_MAX_PLANES];
68 /** internal code, driver dependant */ 68 /** internal code, driver dependent */
69 unsigned long cookie; 69 unsigned long cookie;
70}; 70};
71 71
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 641b1f071e06..81b97db111d8 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -528,7 +528,7 @@ static int mxr_s_dv_timings(struct file *file, void *fh,
528 mutex_lock(&mdev->mutex); 528 mutex_lock(&mdev->mutex);
529 529
530 /* timings change cannot be done while there is an entity 530 /* timings change cannot be done while there is an entity
531 * dependant on output configuration 531 * dependent on output configuration
532 */ 532 */
533 if (mdev->n_output > 0) { 533 if (mdev->n_output > 0) {
534 mutex_unlock(&mdev->mutex); 534 mutex_unlock(&mdev->mutex);
@@ -585,7 +585,7 @@ static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm)
585 mutex_lock(&mdev->mutex); 585 mutex_lock(&mdev->mutex);
586 586
587 /* standard change cannot be done while there is an entity 587 /* standard change cannot be done while there is an entity
588 * dependant on output configuration 588 * dependent on output configuration
589 */ 589 */
590 if (mdev->n_output > 0) { 590 if (mdev->n_output > 0) {
591 mutex_unlock(&mdev->mutex); 591 mutex_unlock(&mdev->mutex);
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index 6769193c7c7b..74ce8b6b79fa 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -1495,7 +1495,7 @@ static int omap1_cam_set_bus_param(struct soc_camera_device *icd)
1495 if (ctrlclock & LCLK_EN) 1495 if (ctrlclock & LCLK_EN)
1496 CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); 1496 CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
1497 1497
1498 /* select bus endianess */ 1498 /* select bus endianness */
1499 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 1499 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
1500 fmt = xlate->host_fmt; 1500 fmt = xlate->host_fmt;
1501 1501
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index 6a74ce040d28..ccdadd623a3a 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
565 565
566 desc = dmaengine_prep_slave_sg(fh->chan, 566 desc = dmaengine_prep_slave_sg(fh->chan,
567 buf->sg, sg_elems, DMA_DEV_TO_MEM, 567 buf->sg, sg_elems, DMA_DEV_TO_MEM,
568 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 568 DMA_PREP_INTERRUPT);
569 if (!desc) { 569 if (!desc) {
570 spin_lock_irq(&fh->queue_lock); 570 spin_lock_irq(&fh->queue_lock);
571 list_del_init(&vb->queue); 571 list_del_init(&vb->queue);
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
index 1d3f11965196..2d4e73b45c5e 100644
--- a/drivers/media/platform/vivi.c
+++ b/drivers/media/platform/vivi.c
@@ -1108,7 +1108,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
1108 return 0; 1108 return 0;
1109} 1109}
1110 1110
1111/* timeperframe is arbitrary and continous */ 1111/* timeperframe is arbitrary and continuous */
1112static int vidioc_enum_frameintervals(struct file *file, void *priv, 1112static int vidioc_enum_frameintervals(struct file *file, void *priv,
1113 struct v4l2_frmivalenum *fival) 1113 struct v4l2_frmivalenum *fival)
1114{ 1114{
@@ -1125,7 +1125,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
1125 1125
1126 fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; 1126 fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
1127 1127
1128 /* fill in stepwise (step=1.0 is requred by V4L2 spec) */ 1128 /* fill in stepwise (step=1.0 is required by V4L2 spec) */
1129 fival->stepwise.min = tpf_min; 1129 fival->stepwise.min = tpf_min;
1130 fival->stepwise.max = tpf_max; 1130 fival->stepwise.max = tpf_max;
1131 fival->stepwise.step = (struct v4l2_fract) {1, 1}; 1131 fival->stepwise.step = (struct v4l2_fract) {1, 1};
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 1c9e771aa15c..d16bf0f41e24 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -323,7 +323,7 @@ static void vsp1_clocks_disable(struct vsp1_device *vsp1)
323 * Increment the VSP1 reference count and initialize the device if the first 323 * Increment the VSP1 reference count and initialize the device if the first
324 * reference is taken. 324 * reference is taken.
325 * 325 *
326 * Return a pointer to the VSP1 device or NULL if an error occured. 326 * Return a pointer to the VSP1 device or NULL if an error occurred.
327 */ 327 */
328struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1) 328struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1)
329{ 329{
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 714c53ef6c11..4b0ac07af662 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -1026,8 +1026,10 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf)
1026 1026
1027 /* ... and the buffers queue... */ 1027 /* ... and the buffers queue... */
1028 video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev); 1028 video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev);
1029 if (IS_ERR(video->alloc_ctx)) 1029 if (IS_ERR(video->alloc_ctx)) {
1030 ret = PTR_ERR(video->alloc_ctx);
1030 goto error; 1031 goto error;
1032 }
1031 1033
1032 video->queue.type = video->type; 1034 video->queue.type = video->type;
1033 video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; 1035 video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index 3db8a8cfe1a8..050b3bb96fec 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -271,8 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark)
271 cancel_work_sync(&shark->led_work); 271 cancel_work_sync(&shark->led_work);
272} 272}
273 273
274#ifdef CONFIG_PM 274static inline void shark_resume_leds(struct shark_device *shark)
275static void shark_resume_leds(struct shark_device *shark)
276{ 275{
277 if (test_bit(BLUE_IS_PULSE, &shark->brightness_new)) 276 if (test_bit(BLUE_IS_PULSE, &shark->brightness_new))
278 set_bit(BLUE_PULSE_LED, &shark->brightness_new); 277 set_bit(BLUE_PULSE_LED, &shark->brightness_new);
@@ -281,7 +280,6 @@ static void shark_resume_leds(struct shark_device *shark)
281 set_bit(RED_LED, &shark->brightness_new); 280 set_bit(RED_LED, &shark->brightness_new);
282 schedule_work(&shark->led_work); 281 schedule_work(&shark->led_work);
283} 282}
284#endif
285#else 283#else
286static int shark_register_leds(struct shark_device *shark, struct device *dev) 284static int shark_register_leds(struct shark_device *shark, struct device *dev)
287{ 285{
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index d86d90dab8bf..8654e0dc5c95 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -237,8 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark)
237 cancel_work_sync(&shark->led_work); 237 cancel_work_sync(&shark->led_work);
238} 238}
239 239
240#ifdef CONFIG_PM 240static inline void shark_resume_leds(struct shark_device *shark)
241static void shark_resume_leds(struct shark_device *shark)
242{ 241{
243 int i; 242 int i;
244 243
@@ -247,7 +246,6 @@ static void shark_resume_leds(struct shark_device *shark)
247 246
248 schedule_work(&shark->led_work); 247 schedule_work(&shark->led_work);
249} 248}
250#endif
251#else 249#else
252static int shark_register_leds(struct shark_device *shark, struct device *dev) 250static int shark_register_leds(struct shark_device *shark, struct device *dev)
253{ 251{
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 9c9084cb99f7..2fd9009f8663 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -268,8 +268,8 @@ struct si476x_radio;
268 * 268 *
269 * @tune_freq: Tune chip to a specific frequency 269 * @tune_freq: Tune chip to a specific frequency
270 * @seek_start: Star station seeking 270 * @seek_start: Star station seeking
271 * @rsq_status: Get Recieved Signal Quality(RSQ) status 271 * @rsq_status: Get Received Signal Quality(RSQ) status
272 * @rds_blckcnt: Get recived RDS blocks count 272 * @rds_blckcnt: Get received RDS blocks count
273 * @phase_diversity: Change phase diversity mode of the tuner 273 * @phase_diversity: Change phase diversity mode of the tuner
274 * @phase_div_status: Get phase diversity mode status 274 * @phase_div_status: Get phase diversity mode status
275 * @acf_status: Get the status of Automatically Controlled 275 * @acf_status: Get the status of Automatically Controlled
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 036e2f54f4db..3ed1f5669f79 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -356,7 +356,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
356 So we keep it as-is. */ 356 So we keep it as-is. */
357 return -EINVAL; 357 return -EINVAL;
358 } 358 }
359 clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); 359 freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL);
360 tea5764_power_up(radio); 360 tea5764_power_up(radio);
361 tea5764_tune(radio, (freq * 125) / 2); 361 tea5764_tune(radio, (freq * 125) / 2);
362 return 0; 362 return 0;
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 69e3245a58a0..a9319a24c7ef 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -112,7 +112,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen
112 if (f->tuner != 0) 112 if (f->tuner != 0)
113 return -EINVAL; 113 return -EINVAL;
114 114
115 clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ); 115 freq = clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
116 pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL; 116 pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL;
117 i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM; 117 i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM;
118 i2cmsg[1] = (pll >> 8) & 0xff; 118 i2cmsg[1] = (pll >> 8) & 0xff;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 72e3fa652481..f329485c6629 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1370,7 +1370,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
1370 * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates 1370 * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates
1371 * 0x688301b7 and the right one 0x688481b7. All other keys generate 1371 * 0x688301b7 and the right one 0x688481b7. All other keys generate
1372 * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with 1372 * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with
1373 * reversed endianess. Extract direction from buffer, rotate endianess, 1373 * reversed endianness. Extract direction from buffer, rotate endianness,
1374 * adjust sign and feed the values into stabilize(). The resulting codes 1374 * adjust sign and feed the values into stabilize(). The resulting codes
1375 * will be 0x01008000, 0x01007F00, which match the newer devices. 1375 * will be 0x01008000, 0x01007F00, which match the newer devices.
1376 */ 1376 */
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 094484fac94c..a5d4f883d053 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -118,7 +118,7 @@ static int debug;
118#define RR3_IR_IO_LENGTH_FUZZ 0x04 118#define RR3_IR_IO_LENGTH_FUZZ 0x04
119/* Timeout for end of signal detection */ 119/* Timeout for end of signal detection */
120#define RR3_IR_IO_SIG_TIMEOUT 0x05 120#define RR3_IR_IO_SIG_TIMEOUT 0x05
121/* Minumum value for pause recognition. */ 121/* Minimum value for pause recognition. */
122#define RR3_IR_IO_MIN_PAUSE 0x06 122#define RR3_IR_IO_MIN_PAUSE 0x06
123 123
124/* Clock freq. of EZ-USB chip */ 124/* Clock freq. of EZ-USB chip */
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 2e1a02e360ff..20cca405bf45 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -1195,7 +1195,7 @@ static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state,
1195 * DNC Output is selected, the other is always off) 1195 * DNC Output is selected, the other is always off)
1196 * 1196 *
1197 * @state: ptr to mt2063_state structure 1197 * @state: ptr to mt2063_state structure
1198 * @Mode: desired reciever delivery system 1198 * @Mode: desired receiver delivery system
1199 * 1199 *
1200 * Note: Register cache must be valid for it to work 1200 * Note: Register cache must be valid for it to work
1201 */ 1201 */
@@ -2119,7 +2119,7 @@ static int mt2063_set_analog_params(struct dvb_frontend *fe,
2119 2119
2120/* 2120/*
2121 * As defined on EN 300 429, the DVB-C roll-off factor is 0.15. 2121 * As defined on EN 300 429, the DVB-C roll-off factor is 0.15.
2122 * So, the amount of the needed bandwith is given by: 2122 * So, the amount of the needed bandwidth is given by:
2123 * Bw = Symbol_rate * (1 + 0.15) 2123 * Bw = Symbol_rate * (1 + 0.15)
2124 * As such, the maximum symbol rate supported by 6 MHz is given by: 2124 * As such, the maximum symbol rate supported by 6 MHz is given by:
2125 * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds 2125 * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h
index 74dc46a71f64..7e4798783db7 100644
--- a/drivers/media/tuners/tuner-xc2028-types.h
+++ b/drivers/media/tuners/tuner-xc2028-types.h
@@ -119,7 +119,7 @@
119#define V4L2_STD_A2 (V4L2_STD_A2_A | V4L2_STD_A2_B) 119#define V4L2_STD_A2 (V4L2_STD_A2_A | V4L2_STD_A2_B)
120#define V4L2_STD_NICAM (V4L2_STD_NICAM_A | V4L2_STD_NICAM_B) 120#define V4L2_STD_NICAM (V4L2_STD_NICAM_A | V4L2_STD_NICAM_B)
121 121
122/* To preserve backward compatibilty, 122/* To preserve backward compatibility,
123 (std & V4L2_STD_AUDIO) = 0 means that ALL audio stds are supported 123 (std & V4L2_STD_AUDIO) = 0 means that ALL audio stds are supported
124 */ 124 */
125 125
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e9d017bea377..528cce958a82 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1412,8 +1412,8 @@ err_v4l2:
1412 usb_set_intfdata(interface, NULL); 1412 usb_set_intfdata(interface, NULL);
1413err_if: 1413err_if:
1414 usb_put_dev(udev); 1414 usb_put_dev(udev);
1415 kfree(dev);
1416 clear_bit(dev->devno, &cx231xx_devused); 1415 clear_bit(dev->devno, &cx231xx_devused);
1416 kfree(dev);
1417 return retval; 1417 return retval;
1418} 1418}
1419 1419
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index c8fcd78425bd..8f9b2cea88f0 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -131,7 +131,7 @@ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
131{ 131{
132 u8 wbuf[MAX_XFER_SIZE]; 132 u8 wbuf[MAX_XFER_SIZE];
133 u8 mbox = (reg >> 16) & 0xff; 133 u8 mbox = (reg >> 16) & 0xff;
134 struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL }; 134 struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL };
135 135
136 if (6 + len > sizeof(wbuf)) { 136 if (6 + len > sizeof(wbuf)) {
137 dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n", 137 dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
@@ -238,14 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
238 } else { 238 } else {
239 /* I2C */ 239 /* I2C */
240 u8 buf[MAX_XFER_SIZE]; 240 u8 buf[MAX_XFER_SIZE];
241 struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf), 241 struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len,
242 buf, msg[1].len, msg[1].buf }; 242 buf, msg[1].len, msg[1].buf };
243 243
244 if (5 + msg[0].len > sizeof(buf)) { 244 if (5 + msg[0].len > sizeof(buf)) {
245 dev_warn(&d->udev->dev, 245 dev_warn(&d->udev->dev,
246 "%s: i2c xfer: len=%d is too big!\n", 246 "%s: i2c xfer: len=%d is too big!\n",
247 KBUILD_MODNAME, msg[0].len); 247 KBUILD_MODNAME, msg[0].len);
248 return -EOPNOTSUPP; 248 ret = -EOPNOTSUPP;
249 goto unlock;
249 } 250 }
250 req.mbox |= ((msg[0].addr & 0x80) >> 3); 251 req.mbox |= ((msg[0].addr & 0x80) >> 3);
251 buf[0] = msg[1].len; 252 buf[0] = msg[1].len;
@@ -274,14 +275,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
274 } else { 275 } else {
275 /* I2C */ 276 /* I2C */
276 u8 buf[MAX_XFER_SIZE]; 277 u8 buf[MAX_XFER_SIZE];
277 struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf, 278 struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len,
278 0, NULL }; 279 buf, 0, NULL };
279 280
280 if (5 + msg[0].len > sizeof(buf)) { 281 if (5 + msg[0].len > sizeof(buf)) {
281 dev_warn(&d->udev->dev, 282 dev_warn(&d->udev->dev,
282 "%s: i2c xfer: len=%d is too big!\n", 283 "%s: i2c xfer: len=%d is too big!\n",
283 KBUILD_MODNAME, msg[0].len); 284 KBUILD_MODNAME, msg[0].len);
284 return -EOPNOTSUPP; 285 ret = -EOPNOTSUPP;
286 goto unlock;
285 } 287 }
286 req.mbox |= ((msg[0].addr & 0x80) >> 3); 288 req.mbox |= ((msg[0].addr & 0x80) >> 3);
287 buf[0] = msg[0].len; 289 buf[0] = msg[0].len;
@@ -319,6 +321,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
319 ret = -EOPNOTSUPP; 321 ret = -EOPNOTSUPP;
320 } 322 }
321 323
324unlock:
322 mutex_unlock(&d->i2c_mutex); 325 mutex_unlock(&d->i2c_mutex);
323 326
324 if (ret < 0) 327 if (ret < 0)
@@ -1534,6 +1537,8 @@ static const struct usb_device_id af9035_id_table[] = {
1534 /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */ 1537 /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */
1535 { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099, 1538 { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099,
1536 &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, 1539 &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
1540 { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
1541 &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
1537 { } 1542 { }
1538}; 1543};
1539MODULE_DEVICE_TABLE(usb, af9035_id_table); 1544MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 2627553f7de1..08240e498451 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -266,7 +266,7 @@ static int mxl111sf_adap_fe_init(struct dvb_frontend *fe)
266 struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; 266 struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id];
267 int err; 267 int err;
268 268
269 /* exit if we didnt initialize the driver yet */ 269 /* exit if we didn't initialize the driver yet */
270 if (!state->chip_id) { 270 if (!state->chip_id) {
271 mxl_debug("driver not yet initialized, exit."); 271 mxl_debug("driver not yet initialized, exit.");
272 goto fail; 272 goto fail;
@@ -322,7 +322,7 @@ static int mxl111sf_adap_fe_sleep(struct dvb_frontend *fe)
322 struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; 322 struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id];
323 int err; 323 int err;
324 324
325 /* exit if we didnt initialize the driver yet */ 325 /* exit if we didn't initialize the driver yet */
326 if (!state->chip_id) { 326 if (!state->chip_id) {
327 mxl_debug("driver not yet initialized, exit."); 327 mxl_debug("driver not yet initialized, exit.");
328 goto fail; 328 goto fail;
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 40832a1aef6c..98d24aefb640 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -102,7 +102,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
102 if (rxlen > 62) { 102 if (rxlen > 62) {
103 err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)", 103 err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)",
104 device_addr); 104 device_addr);
105 txlen = 62; 105 rxlen = 62;
106 } 106 }
107 107
108 b[0] = I2C_SPEED_100KHZ_BIT; 108 b[0] = I2C_SPEED_100KHZ_BIT;
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index fc5d60efd4ab..dd19c9ff76e0 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1664,8 +1664,8 @@ static int em28xx_v4l2_close(struct file *filp)
1664 1664
1665 em28xx_videodbg("users=%d\n", dev->users); 1665 em28xx_videodbg("users=%d\n", dev->users);
1666 1666
1667 mutex_lock(&dev->lock);
1668 vb2_fop_release(filp); 1667 vb2_fop_release(filp);
1668 mutex_lock(&dev->lock);
1669 1669
1670 if (dev->users == 1) { 1670 if (dev->users == 1) {
1671 /* the device is already disconnect, 1671 /* the device is already disconnect,
diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c
index cb1e64ca59c9..cea8d7f51c3c 100644
--- a/drivers/media/usb/gspca/gl860/gl860.c
+++ b/drivers/media/usb/gspca/gl860/gl860.c
@@ -438,7 +438,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
438 s32 nToSkip = 438 s32 nToSkip =
439 sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1); 439 sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1);
440 440
441 /* Test only against 0202h, so endianess does not matter */ 441 /* Test only against 0202h, so endianness does not matter */
442 switch (*(s16 *) data) { 442 switch (*(s16 *) data) {
443 case 0x0202: /* End of frame, start a new one */ 443 case 0x0202: /* End of frame, start a new one */
444 gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); 444 gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index cd79c180f67b..07529e5a0c56 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -416,7 +416,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
416#if IS_ENABLED(CONFIG_INPUT) 416#if IS_ENABLED(CONFIG_INPUT)
417static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, 417static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
418 u8 *data, /* interrupt packet data */ 418 u8 *data, /* interrupt packet data */
419 int len) /* interrput packet length */ 419 int len) /* interrupt packet length */
420{ 420{
421 int ret = -EINVAL; 421 int ret = -EINVAL;
422 422
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index a91509643563..2fd1c5e31a0f 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -874,7 +874,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
874#if IS_ENABLED(CONFIG_INPUT) 874#if IS_ENABLED(CONFIG_INPUT)
875static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, 875static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
876 u8 *data, /* interrupt packet data */ 876 u8 *data, /* interrupt packet data */
877 int len) /* interrput packet length */ 877 int len) /* interrupt packet length */
878{ 878{
879 int ret = -EINVAL; 879 int ret = -EINVAL;
880 u8 data0, data1; 880 u8 data0, data1;
diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c
index 1fc80af2a189..48234c9a8b6c 100644
--- a/drivers/media/usb/gspca/stk1135.c
+++ b/drivers/media/usb/gspca/stk1135.c
@@ -361,6 +361,9 @@ static void stk1135_configure_clock(struct gspca_dev *gspca_dev)
361 361
362 /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */ 362 /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */
363 reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f); 363 reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f);
364
365 /* wait a while for sensor to catch up */
366 udelay(1000);
364} 367}
365 368
366static void stk1135_camera_disable(struct gspca_dev *gspca_dev) 369static void stk1135_camera_disable(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c
index 9c0827631b9c..7f94ec74282e 100644
--- a/drivers/media/usb/gspca/stv0680.c
+++ b/drivers/media/usb/gspca/stv0680.c
@@ -139,7 +139,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
139 struct sd *sd = (struct sd *) gspca_dev; 139 struct sd *sd = (struct sd *) gspca_dev;
140 struct cam *cam = &gspca_dev->cam; 140 struct cam *cam = &gspca_dev->cam;
141 141
142 /* Give the camera some time to settle, otherwise initalization will 142 /* Give the camera some time to settle, otherwise initialization will
143 fail on hotplug, and yes it really needs a full second. */ 143 fail on hotplug, and yes it really needs a full second. */
144 msleep(1000); 144 msleep(1000);
145 145
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index a517d185febe..46c9f2229a18 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -1027,6 +1027,7 @@ static const struct usb_device_id device_table[] = {
1027 {USB_DEVICE(0x055f, 0xc650), BS(SPCA533, 0)}, 1027 {USB_DEVICE(0x055f, 0xc650), BS(SPCA533, 0)},
1028 {USB_DEVICE(0x05da, 0x1018), BS(SPCA504B, 0)}, 1028 {USB_DEVICE(0x05da, 0x1018), BS(SPCA504B, 0)},
1029 {USB_DEVICE(0x06d6, 0x0031), BS(SPCA533, 0)}, 1029 {USB_DEVICE(0x06d6, 0x0031), BS(SPCA533, 0)},
1030 {USB_DEVICE(0x06d6, 0x0041), BS(SPCA504B, 0)},
1030 {USB_DEVICE(0x0733, 0x1311), BS(SPCA533, 0)}, 1031 {USB_DEVICE(0x0733, 0x1311), BS(SPCA533, 0)},
1031 {USB_DEVICE(0x0733, 0x1314), BS(SPCA533, 0)}, 1032 {USB_DEVICE(0x0733, 0x1314), BS(SPCA533, 0)},
1032 {USB_DEVICE(0x0733, 0x2211), BS(SPCA533, 0)}, 1033 {USB_DEVICE(0x0733, 0x2211), BS(SPCA533, 0)},
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index 7b95d8e88a20..d3e1b6d8bf49 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -6905,7 +6905,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
6905#if IS_ENABLED(CONFIG_INPUT) 6905#if IS_ENABLED(CONFIG_INPUT)
6906static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, 6906static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
6907 u8 *data, /* interrupt packet data */ 6907 u8 *data, /* interrupt packet data */
6908 int len) /* interrput packet length */ 6908 int len) /* interrupt packet length */
6909{ 6909{
6910 if (len == 8 && data[4] == 1) { 6910 if (len == 8 && data[4] == 1) {
6911 input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); 6911 input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 77bbf7889659..78c9bc8e7f56 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1039,7 +1039,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1039 /* Set the leds off */ 1039 /* Set the leds off */
1040 pwc_set_leds(pdev, 0, 0); 1040 pwc_set_leds(pdev, 0, 0);
1041 1041
1042 /* Setup intial videomode */ 1042 /* Setup initial videomode */
1043 rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, 1043 rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
1044 V4L2_PIX_FMT_YUV420, 30, &compression, 1); 1044 V4L2_PIX_FMT_YUV420, 30, &compression, 1);
1045 if (rc) 1045 if (rc)
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c
index 8a505a90d318..6222a4ab1e00 100644
--- a/drivers/media/usb/usbtv/usbtv.c
+++ b/drivers/media/usb/usbtv/usbtv.c
@@ -50,13 +50,8 @@
50#define USBTV_ISOC_TRANSFERS 16 50#define USBTV_ISOC_TRANSFERS 16
51#define USBTV_ISOC_PACKETS 8 51#define USBTV_ISOC_PACKETS 8
52 52
53#define USBTV_WIDTH 720
54#define USBTV_HEIGHT 480
55
56#define USBTV_CHUNK_SIZE 256 53#define USBTV_CHUNK_SIZE 256
57#define USBTV_CHUNK 240 54#define USBTV_CHUNK 240
58#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \
59 / 4 / USBTV_CHUNK)
60 55
61/* Chunk header. */ 56/* Chunk header. */
62#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ 57#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
@@ -65,6 +60,27 @@
65#define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15) 60#define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15)
66#define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff) 61#define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff)
67 62
63#define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL)
64
65/* parameters for supported TV norms */
66struct usbtv_norm_params {
67 v4l2_std_id norm;
68 int cap_width, cap_height;
69};
70
71static struct usbtv_norm_params norm_params[] = {
72 {
73 .norm = V4L2_STD_525_60,
74 .cap_width = 720,
75 .cap_height = 480,
76 },
77 {
78 .norm = V4L2_STD_PAL,
79 .cap_width = 720,
80 .cap_height = 576,
81 }
82};
83
68/* A single videobuf2 frame buffer. */ 84/* A single videobuf2 frame buffer. */
69struct usbtv_buf { 85struct usbtv_buf {
70 struct vb2_buffer vb; 86 struct vb2_buffer vb;
@@ -94,11 +110,38 @@ struct usbtv {
94 USBTV_COMPOSITE_INPUT, 110 USBTV_COMPOSITE_INPUT,
95 USBTV_SVIDEO_INPUT, 111 USBTV_SVIDEO_INPUT,
96 } input; 112 } input;
113 v4l2_std_id norm;
114 int width, height;
115 int n_chunks;
97 int iso_size; 116 int iso_size;
98 unsigned int sequence; 117 unsigned int sequence;
99 struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS]; 118 struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
100}; 119};
101 120
121static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm)
122{
123 int i, ret = 0;
124 struct usbtv_norm_params *params = NULL;
125
126 for (i = 0; i < ARRAY_SIZE(norm_params); i++) {
127 if (norm_params[i].norm & norm) {
128 params = &norm_params[i];
129 break;
130 }
131 }
132
133 if (params) {
134 usbtv->width = params->cap_width;
135 usbtv->height = params->cap_height;
136 usbtv->n_chunks = usbtv->width * usbtv->height
137 / 4 / USBTV_CHUNK;
138 usbtv->norm = params->norm;
139 } else
140 ret = -EINVAL;
141
142 return ret;
143}
144
102static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size) 145static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
103{ 146{
104 int ret; 147 int ret;
@@ -158,6 +201,57 @@ static int usbtv_select_input(struct usbtv *usbtv, int input)
158 return ret; 201 return ret;
159} 202}
160 203
204static int usbtv_select_norm(struct usbtv *usbtv, v4l2_std_id norm)
205{
206 int ret;
207 static const u16 pal[][2] = {
208 { USBTV_BASE + 0x001a, 0x0068 },
209 { USBTV_BASE + 0x010e, 0x0072 },
210 { USBTV_BASE + 0x010f, 0x00a2 },
211 { USBTV_BASE + 0x0112, 0x00b0 },
212 { USBTV_BASE + 0x0117, 0x0001 },
213 { USBTV_BASE + 0x0118, 0x002c },
214 { USBTV_BASE + 0x012d, 0x0010 },
215 { USBTV_BASE + 0x012f, 0x0020 },
216 { USBTV_BASE + 0x024f, 0x0002 },
217 { USBTV_BASE + 0x0254, 0x0059 },
218 { USBTV_BASE + 0x025a, 0x0016 },
219 { USBTV_BASE + 0x025b, 0x0035 },
220 { USBTV_BASE + 0x0263, 0x0017 },
221 { USBTV_BASE + 0x0266, 0x0016 },
222 { USBTV_BASE + 0x0267, 0x0036 }
223 };
224
225 static const u16 ntsc[][2] = {
226 { USBTV_BASE + 0x001a, 0x0079 },
227 { USBTV_BASE + 0x010e, 0x0068 },
228 { USBTV_BASE + 0x010f, 0x009c },
229 { USBTV_BASE + 0x0112, 0x00f0 },
230 { USBTV_BASE + 0x0117, 0x0000 },
231 { USBTV_BASE + 0x0118, 0x00fc },
232 { USBTV_BASE + 0x012d, 0x0004 },
233 { USBTV_BASE + 0x012f, 0x0008 },
234 { USBTV_BASE + 0x024f, 0x0001 },
235 { USBTV_BASE + 0x0254, 0x005f },
236 { USBTV_BASE + 0x025a, 0x0012 },
237 { USBTV_BASE + 0x025b, 0x0001 },
238 { USBTV_BASE + 0x0263, 0x001c },
239 { USBTV_BASE + 0x0266, 0x0011 },
240 { USBTV_BASE + 0x0267, 0x0005 }
241 };
242
243 ret = usbtv_configure_for_norm(usbtv, norm);
244
245 if (!ret) {
246 if (norm & V4L2_STD_525_60)
247 ret = usbtv_set_regs(usbtv, ntsc, ARRAY_SIZE(ntsc));
248 else if (norm & V4L2_STD_PAL)
249 ret = usbtv_set_regs(usbtv, pal, ARRAY_SIZE(pal));
250 }
251
252 return ret;
253}
254
161static int usbtv_setup_capture(struct usbtv *usbtv) 255static int usbtv_setup_capture(struct usbtv *usbtv)
162{ 256{
163 int ret; 257 int ret;
@@ -225,26 +319,11 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
225 319
226 { USBTV_BASE + 0x0284, 0x0088 }, 320 { USBTV_BASE + 0x0284, 0x0088 },
227 { USBTV_BASE + 0x0003, 0x0004 }, 321 { USBTV_BASE + 0x0003, 0x0004 },
228 { USBTV_BASE + 0x001a, 0x0079 },
229 { USBTV_BASE + 0x0100, 0x00d3 }, 322 { USBTV_BASE + 0x0100, 0x00d3 },
230 { USBTV_BASE + 0x010e, 0x0068 },
231 { USBTV_BASE + 0x010f, 0x009c },
232 { USBTV_BASE + 0x0112, 0x00f0 },
233 { USBTV_BASE + 0x0115, 0x0015 }, 323 { USBTV_BASE + 0x0115, 0x0015 },
234 { USBTV_BASE + 0x0117, 0x0000 },
235 { USBTV_BASE + 0x0118, 0x00fc },
236 { USBTV_BASE + 0x012d, 0x0004 },
237 { USBTV_BASE + 0x012f, 0x0008 },
238 { USBTV_BASE + 0x0220, 0x002e }, 324 { USBTV_BASE + 0x0220, 0x002e },
239 { USBTV_BASE + 0x0225, 0x0008 }, 325 { USBTV_BASE + 0x0225, 0x0008 },
240 { USBTV_BASE + 0x024e, 0x0002 }, 326 { USBTV_BASE + 0x024e, 0x0002 },
241 { USBTV_BASE + 0x024f, 0x0001 },
242 { USBTV_BASE + 0x0254, 0x005f },
243 { USBTV_BASE + 0x025a, 0x0012 },
244 { USBTV_BASE + 0x025b, 0x0001 },
245 { USBTV_BASE + 0x0263, 0x001c },
246 { USBTV_BASE + 0x0266, 0x0011 },
247 { USBTV_BASE + 0x0267, 0x0005 },
248 { USBTV_BASE + 0x024e, 0x0002 }, 327 { USBTV_BASE + 0x024e, 0x0002 },
249 { USBTV_BASE + 0x024f, 0x0002 }, 328 { USBTV_BASE + 0x024f, 0x0002 },
250 }; 329 };
@@ -253,6 +332,10 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
253 if (ret) 332 if (ret)
254 return ret; 333 return ret;
255 334
335 ret = usbtv_select_norm(usbtv, usbtv->norm);
336 if (ret)
337 return ret;
338
256 ret = usbtv_select_input(usbtv, usbtv->input); 339 ret = usbtv_select_input(usbtv, usbtv->input);
257 if (ret) 340 if (ret)
258 return ret; 341 return ret;
@@ -296,7 +379,7 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
296 frame_id = USBTV_FRAME_ID(chunk); 379 frame_id = USBTV_FRAME_ID(chunk);
297 odd = USBTV_ODD(chunk); 380 odd = USBTV_ODD(chunk);
298 chunk_no = USBTV_CHUNK_NO(chunk); 381 chunk_no = USBTV_CHUNK_NO(chunk);
299 if (chunk_no >= USBTV_CHUNKS) 382 if (chunk_no >= usbtv->n_chunks)
300 return; 383 return;
301 384
302 /* Beginning of a frame. */ 385 /* Beginning of a frame. */
@@ -324,10 +407,10 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
324 usbtv->chunks_done++; 407 usbtv->chunks_done++;
325 408
326 /* Last chunk in a frame, signalling an end */ 409 /* Last chunk in a frame, signalling an end */
327 if (odd && chunk_no == USBTV_CHUNKS-1) { 410 if (odd && chunk_no == usbtv->n_chunks-1) {
328 int size = vb2_plane_size(&buf->vb, 0); 411 int size = vb2_plane_size(&buf->vb, 0);
329 enum vb2_buffer_state state = usbtv->chunks_done == 412 enum vb2_buffer_state state = usbtv->chunks_done ==
330 USBTV_CHUNKS ? 413 usbtv->n_chunks ?
331 VB2_BUF_STATE_DONE : 414 VB2_BUF_STATE_DONE :
332 VB2_BUF_STATE_ERROR; 415 VB2_BUF_STATE_ERROR;
333 416
@@ -500,6 +583,8 @@ static int usbtv_querycap(struct file *file, void *priv,
500static int usbtv_enum_input(struct file *file, void *priv, 583static int usbtv_enum_input(struct file *file, void *priv,
501 struct v4l2_input *i) 584 struct v4l2_input *i)
502{ 585{
586 struct usbtv *dev = video_drvdata(file);
587
503 switch (i->index) { 588 switch (i->index) {
504 case USBTV_COMPOSITE_INPUT: 589 case USBTV_COMPOSITE_INPUT:
505 strlcpy(i->name, "Composite", sizeof(i->name)); 590 strlcpy(i->name, "Composite", sizeof(i->name));
@@ -512,7 +597,7 @@ static int usbtv_enum_input(struct file *file, void *priv,
512 } 597 }
513 598
514 i->type = V4L2_INPUT_TYPE_CAMERA; 599 i->type = V4L2_INPUT_TYPE_CAMERA;
515 i->std = V4L2_STD_525_60; 600 i->std = dev->vdev.tvnorms;
516 return 0; 601 return 0;
517} 602}
518 603
@@ -531,23 +616,37 @@ static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv,
531static int usbtv_fmt_vid_cap(struct file *file, void *priv, 616static int usbtv_fmt_vid_cap(struct file *file, void *priv,
532 struct v4l2_format *f) 617 struct v4l2_format *f)
533{ 618{
534 f->fmt.pix.width = USBTV_WIDTH; 619 struct usbtv *usbtv = video_drvdata(file);
535 f->fmt.pix.height = USBTV_HEIGHT; 620
621 f->fmt.pix.width = usbtv->width;
622 f->fmt.pix.height = usbtv->height;
536 f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; 623 f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
537 f->fmt.pix.field = V4L2_FIELD_INTERLACED; 624 f->fmt.pix.field = V4L2_FIELD_INTERLACED;
538 f->fmt.pix.bytesperline = USBTV_WIDTH * 2; 625 f->fmt.pix.bytesperline = usbtv->width * 2;
539 f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height); 626 f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height);
540 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; 627 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
541 f->fmt.pix.priv = 0; 628
542 return 0; 629 return 0;
543} 630}
544 631
545static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm) 632static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm)
546{ 633{
547 *norm = V4L2_STD_525_60; 634 struct usbtv *usbtv = video_drvdata(file);
635 *norm = usbtv->norm;
548 return 0; 636 return 0;
549} 637}
550 638
639static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
640{
641 int ret = -EINVAL;
642 struct usbtv *usbtv = video_drvdata(file);
643
644 if ((norm & V4L2_STD_525_60) || (norm & V4L2_STD_PAL))
645 ret = usbtv_select_norm(usbtv, norm);
646
647 return ret;
648}
649
551static int usbtv_g_input(struct file *file, void *priv, unsigned int *i) 650static int usbtv_g_input(struct file *file, void *priv, unsigned int *i)
552{ 651{
553 struct usbtv *usbtv = video_drvdata(file); 652 struct usbtv *usbtv = video_drvdata(file);
@@ -561,13 +660,6 @@ static int usbtv_s_input(struct file *file, void *priv, unsigned int i)
561 return usbtv_select_input(usbtv, i); 660 return usbtv_select_input(usbtv, i);
562} 661}
563 662
564static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
565{
566 if (norm & V4L2_STD_525_60)
567 return 0;
568 return -EINVAL;
569}
570
571struct v4l2_ioctl_ops usbtv_ioctl_ops = { 663struct v4l2_ioctl_ops usbtv_ioctl_ops = {
572 .vidioc_querycap = usbtv_querycap, 664 .vidioc_querycap = usbtv_querycap,
573 .vidioc_enum_input = usbtv_enum_input, 665 .vidioc_enum_input = usbtv_enum_input,
@@ -604,10 +696,12 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
604 const struct v4l2_format *v4l_fmt, unsigned int *nbuffers, 696 const struct v4l2_format *v4l_fmt, unsigned int *nbuffers,
605 unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) 697 unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
606{ 698{
699 struct usbtv *usbtv = vb2_get_drv_priv(vq);
700
607 if (*nbuffers < 2) 701 if (*nbuffers < 2)
608 *nbuffers = 2; 702 *nbuffers = 2;
609 *nplanes = 1; 703 *nplanes = 1;
610 sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32); 704 sizes[0] = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32);
611 705
612 return 0; 706 return 0;
613} 707}
@@ -690,7 +784,11 @@ static int usbtv_probe(struct usb_interface *intf,
690 return -ENOMEM; 784 return -ENOMEM;
691 usbtv->dev = dev; 785 usbtv->dev = dev;
692 usbtv->udev = usb_get_dev(interface_to_usbdev(intf)); 786 usbtv->udev = usb_get_dev(interface_to_usbdev(intf));
787
693 usbtv->iso_size = size; 788 usbtv->iso_size = size;
789
790 (void)usbtv_configure_for_norm(usbtv, V4L2_STD_525_60);
791
694 spin_lock_init(&usbtv->buflock); 792 spin_lock_init(&usbtv->buflock);
695 mutex_init(&usbtv->v4l2_lock); 793 mutex_init(&usbtv->v4l2_lock);
696 mutex_init(&usbtv->vb2q_lock); 794 mutex_init(&usbtv->vb2q_lock);
@@ -727,7 +825,7 @@ static int usbtv_probe(struct usb_interface *intf,
727 usbtv->vdev.release = video_device_release_empty; 825 usbtv->vdev.release = video_device_release_empty;
728 usbtv->vdev.fops = &usbtv_fops; 826 usbtv->vdev.fops = &usbtv_fops;
729 usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops; 827 usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops;
730 usbtv->vdev.tvnorms = V4L2_STD_525_60; 828 usbtv->vdev.tvnorms = USBTV_TV_STD;
731 usbtv->vdev.queue = &usbtv->vb2q; 829 usbtv->vdev.queue = &usbtv->vb2q;
732 usbtv->vdev.lock = &usbtv->v4l2_lock; 830 usbtv->vdev.lock = &usbtv->v4l2_lock;
733 set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags); 831 set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 899cb6d1c4a4..898c208889cd 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -556,7 +556,7 @@ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample)
556 * 556 *
557 * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1) 557 * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1)
558 * 558 *
559 * to avoid loosing precision in the division. Similarly, the host timestamp is 559 * to avoid losing precision in the division. Similarly, the host timestamp is
560 * computed with 560 * computed with
561 * 561 *
562 * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2) 562 * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2)
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 60dcc0f3b32e..fb46790d0eca 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -420,7 +420,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
420 "Advanced Simple", 420 "Advanced Simple",
421 "Core", 421 "Core",
422 "Simple Scalable", 422 "Simple Scalable",
423 "Advanced Coding Efficency", 423 "Advanced Coding Efficiency",
424 NULL, 424 NULL,
425 }; 425 };
426 426
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index b19b306c8f7f..0edc165f418d 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -145,6 +145,25 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
145} 145}
146 146
147/** 147/**
148 * __setup_lengths() - setup initial lengths for every plane in
149 * every buffer on the queue
150 */
151static void __setup_lengths(struct vb2_queue *q, unsigned int n)
152{
153 unsigned int buffer, plane;
154 struct vb2_buffer *vb;
155
156 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
157 vb = q->bufs[buffer];
158 if (!vb)
159 continue;
160
161 for (plane = 0; plane < vb->num_planes; ++plane)
162 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
163 }
164}
165
166/**
148 * __setup_offsets() - setup unique offsets ("cookies") for every plane in 167 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
149 * every buffer on the queue 168 * every buffer on the queue
150 */ 169 */
@@ -169,7 +188,6 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
169 continue; 188 continue;
170 189
171 for (plane = 0; plane < vb->num_planes; ++plane) { 190 for (plane = 0; plane < vb->num_planes; ++plane) {
172 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
173 vb->v4l2_planes[plane].m.mem_offset = off; 191 vb->v4l2_planes[plane].m.mem_offset = off;
174 192
175 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", 193 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
@@ -241,6 +259,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
241 q->bufs[q->num_buffers + buffer] = vb; 259 q->bufs[q->num_buffers + buffer] = vb;
242 } 260 }
243 261
262 __setup_lengths(q, buffer);
244 if (memory == V4L2_MEMORY_MMAP) 263 if (memory == V4L2_MEMORY_MMAP)
245 __setup_offsets(q, buffer); 264 __setup_offsets(q, buffer);
246 265
@@ -1824,8 +1843,8 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
1824 return -EINVAL; 1843 return -EINVAL;
1825 } 1844 }
1826 1845
1827 if (eb->flags & ~O_CLOEXEC) { 1846 if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
1828 dprintk(1, "Queue does support only O_CLOEXEC flag\n"); 1847 dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n");
1829 return -EINVAL; 1848 return -EINVAL;
1830 } 1849 }
1831 1850
@@ -1848,14 +1867,14 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
1848 1867
1849 vb_plane = &vb->planes[eb->plane]; 1868 vb_plane = &vb->planes[eb->plane];
1850 1869
1851 dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv); 1870 dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
1852 if (IS_ERR_OR_NULL(dbuf)) { 1871 if (IS_ERR_OR_NULL(dbuf)) {
1853 dprintk(1, "Failed to export buffer %d, plane %d\n", 1872 dprintk(1, "Failed to export buffer %d, plane %d\n",
1854 eb->index, eb->plane); 1873 eb->index, eb->plane);
1855 return -EINVAL; 1874 return -EINVAL;
1856 } 1875 }
1857 1876
1858 ret = dma_buf_fd(dbuf, eb->flags); 1877 ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
1859 if (ret < 0) { 1878 if (ret < 0) {
1860 dprintk(3, "buffer %d, plane %d failed to export (%d)\n", 1879 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
1861 eb->index, eb->plane, ret); 1880 eb->index, eb->plane, ret);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 646f08f4f504..33d3871d1e13 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -393,7 +393,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
393 return sgt; 393 return sgt;
394} 394}
395 395
396static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) 396static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
397{ 397{
398 struct vb2_dc_buf *buf = buf_priv; 398 struct vb2_dc_buf *buf = buf_priv;
399 struct dma_buf *dbuf; 399 struct dma_buf *dbuf;
@@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
404 if (WARN_ON(!buf->sgt_base)) 404 if (WARN_ON(!buf->sgt_base))
405 return NULL; 405 return NULL;
406 406
407 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0); 407 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags);
408 if (IS_ERR(dbuf)) 408 if (IS_ERR(dbuf))
409 return NULL; 409 return NULL;
410 410
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 2f860543912c..0d3a8ffe47a3 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -178,7 +178,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
178 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), 178 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
179 GFP_KERNEL); 179 GFP_KERNEL);
180 if (!buf->pages) 180 if (!buf->pages)
181 return NULL; 181 goto userptr_fail_alloc_pages;
182 182
183 num_pages_from_user = get_user_pages(current, current->mm, 183 num_pages_from_user = get_user_pages(current, current->mm,
184 vaddr & PAGE_MASK, 184 vaddr & PAGE_MASK,
@@ -204,6 +204,7 @@ userptr_fail_get_user_pages:
204 while (--num_pages_from_user >= 0) 204 while (--num_pages_from_user >= 0)
205 put_page(buf->pages[num_pages_from_user]); 205 put_page(buf->pages[num_pages_from_user]);
206 kfree(buf->pages); 206 kfree(buf->pages);
207userptr_fail_alloc_pages:
207 kfree(buf); 208 kfree(buf);
208 return NULL; 209 return NULL;
209} 210}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 62a60caa5d1f..dd671582c9a1 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -32,7 +32,7 @@ config MFD_AS3722
32 select MFD_CORE 32 select MFD_CORE
33 select REGMAP_I2C 33 select REGMAP_I2C
34 select REGMAP_IRQ 34 select REGMAP_IRQ
35 depends on I2C && OF 35 depends on I2C=y && OF
36 help 36 help
37 The ams AS3722 is a compact system PMU suitable for mobile phones, 37 The ams AS3722 is a compact system PMU suitable for mobile phones,
38 tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down 38 tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index da1c6566d93d..37edf9e989b0 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -506,7 +506,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
506 .iTCO_version = 2, 506 .iTCO_version = 2,
507 }, 507 },
508 [LPC_WPT_LP] = { 508 [LPC_WPT_LP] = {
509 .name = "Lynx Point_LP", 509 .name = "Wildcat Point_LP",
510 .iTCO_version = 2, 510 .iTCO_version = 2,
511 }, 511 },
512}; 512};
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 34c18fb8c089..54cc25546592 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -81,31 +81,31 @@ static struct of_device_id sec_dt_match[] = {
81 81
82int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest) 82int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
83{ 83{
84 return regmap_read(sec_pmic->regmap, reg, dest); 84 return regmap_read(sec_pmic->regmap_pmic, reg, dest);
85} 85}
86EXPORT_SYMBOL_GPL(sec_reg_read); 86EXPORT_SYMBOL_GPL(sec_reg_read);
87 87
88int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) 88int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
89{ 89{
90 return regmap_bulk_read(sec_pmic->regmap, reg, buf, count); 90 return regmap_bulk_read(sec_pmic->regmap_pmic, reg, buf, count);
91} 91}
92EXPORT_SYMBOL_GPL(sec_bulk_read); 92EXPORT_SYMBOL_GPL(sec_bulk_read);
93 93
94int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value) 94int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value)
95{ 95{
96 return regmap_write(sec_pmic->regmap, reg, value); 96 return regmap_write(sec_pmic->regmap_pmic, reg, value);
97} 97}
98EXPORT_SYMBOL_GPL(sec_reg_write); 98EXPORT_SYMBOL_GPL(sec_reg_write);
99 99
100int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) 100int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
101{ 101{
102 return regmap_raw_write(sec_pmic->regmap, reg, buf, count); 102 return regmap_raw_write(sec_pmic->regmap_pmic, reg, buf, count);
103} 103}
104EXPORT_SYMBOL_GPL(sec_bulk_write); 104EXPORT_SYMBOL_GPL(sec_bulk_write);
105 105
106int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask) 106int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask)
107{ 107{
108 return regmap_update_bits(sec_pmic->regmap, reg, mask, val); 108 return regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, val);
109} 109}
110EXPORT_SYMBOL_GPL(sec_reg_update); 110EXPORT_SYMBOL_GPL(sec_reg_update);
111 111
@@ -166,6 +166,11 @@ static struct regmap_config s5m8767_regmap_config = {
166 .cache_type = REGCACHE_FLAT, 166 .cache_type = REGCACHE_FLAT,
167}; 167};
168 168
169static const struct regmap_config sec_rtc_regmap_config = {
170 .reg_bits = 8,
171 .val_bits = 8,
172};
173
169#ifdef CONFIG_OF 174#ifdef CONFIG_OF
170/* 175/*
171 * Only the common platform data elements for s5m8767 are parsed here from the 176 * Only the common platform data elements for s5m8767 are parsed here from the
@@ -266,9 +271,9 @@ static int sec_pmic_probe(struct i2c_client *i2c,
266 break; 271 break;
267 } 272 }
268 273
269 sec_pmic->regmap = devm_regmap_init_i2c(i2c, regmap); 274 sec_pmic->regmap_pmic = devm_regmap_init_i2c(i2c, regmap);
270 if (IS_ERR(sec_pmic->regmap)) { 275 if (IS_ERR(sec_pmic->regmap_pmic)) {
271 ret = PTR_ERR(sec_pmic->regmap); 276 ret = PTR_ERR(sec_pmic->regmap_pmic);
272 dev_err(&i2c->dev, "Failed to allocate register map: %d\n", 277 dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
273 ret); 278 ret);
274 return ret; 279 return ret;
@@ -277,6 +282,15 @@ static int sec_pmic_probe(struct i2c_client *i2c,
277 sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); 282 sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
278 i2c_set_clientdata(sec_pmic->rtc, sec_pmic); 283 i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
279 284
285 sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc,
286 &sec_rtc_regmap_config);
287 if (IS_ERR(sec_pmic->regmap_rtc)) {
288 ret = PTR_ERR(sec_pmic->regmap_rtc);
289 dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n",
290 ret);
291 return ret;
292 }
293
280 if (pdata && pdata->cfg_pmic_irq) 294 if (pdata && pdata->cfg_pmic_irq)
281 pdata->cfg_pmic_irq(); 295 pdata->cfg_pmic_irq();
282 296
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 0dd84e99081e..b441b1be27cb 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -280,19 +280,19 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
280 280
281 switch (type) { 281 switch (type) {
282 case S5M8763X: 282 case S5M8763X:
283 ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, 283 ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
284 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 284 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
285 sec_pmic->irq_base, &s5m8763_irq_chip, 285 sec_pmic->irq_base, &s5m8763_irq_chip,
286 &sec_pmic->irq_data); 286 &sec_pmic->irq_data);
287 break; 287 break;
288 case S5M8767X: 288 case S5M8767X:
289 ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, 289 ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
290 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 290 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
291 sec_pmic->irq_base, &s5m8767_irq_chip, 291 sec_pmic->irq_base, &s5m8767_irq_chip,
292 &sec_pmic->irq_data); 292 &sec_pmic->irq_data);
293 break; 293 break;
294 case S2MPS11X: 294 case S2MPS11X:
295 ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, 295 ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
296 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 296 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
297 sec_pmic->irq_base, &s2mps11_irq_chip, 297 sec_pmic->irq_base, &s2mps11_irq_chip,
298 &sec_pmic->irq_data); 298 &sec_pmic->irq_data);
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
index 71e3e0c5bf73..a5424579679c 100644
--- a/drivers/mfd/ti-ssp.c
+++ b/drivers/mfd/ti-ssp.c
@@ -32,6 +32,7 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/sched.h>
35#include <linux/mfd/core.h> 36#include <linux/mfd/core.h>
36#include <linux/mfd/ti_ssp.h> 37#include <linux/mfd/ti_ssp.h>
37 38
@@ -409,7 +410,6 @@ static int ti_ssp_probe(struct platform_device *pdev)
409 cells[id].id = id; 410 cells[id].id = id;
410 cells[id].name = data->dev_name; 411 cells[id].name = data->dev_name;
411 cells[id].platform_data = data->pdata; 412 cells[id].platform_data = data->pdata;
412 cells[id].data_size = data->pdata_size;
413 } 413 }
414 414
415 error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL); 415 error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 08b18f3f5264..9e2b985293fc 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -633,8 +633,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
633 struct dma_async_tx_descriptor *tx; 633 struct dma_async_tx_descriptor *tx;
634 dma_cookie_t cookie; 634 dma_cookie_t cookie;
635 dma_addr_t dst, src; 635 dma_addr_t dst, src;
636 unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | 636 unsigned long dma_flags = 0;
637 DMA_COMPL_SKIP_SRC_UNMAP;
638 637
639 dst_sg = buf->vb.sglist; 638 dst_sg = buf->vb.sglist;
640 dst_nents = buf->vb.sglen; 639 dst_nents = buf->vb.sglen;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 0e8df41aaf14..2cf2bbc0b927 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -198,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev)
198{ 198{
199 char name[ENCLOSURE_NAME_SIZE]; 199 char name[ENCLOSURE_NAME_SIZE];
200 200
201 /*
202 * In odd circumstances, like multipath devices, something else may
203 * already have removed the links, so check for this condition first.
204 */
205 if (!cdev->dev->kobj.sd)
206 return;
207
201 enclosure_link_name(cdev, name); 208 enclosure_link_name(cdev, name);
202 sysfs_remove_link(&cdev->dev->kobj, name); 209 sysfs_remove_link(&cdev->dev->kobj, name);
203 sysfs_remove_link(&cdev->cdev.kobj, "device"); 210 sysfs_remove_link(&cdev->cdev.kobj, "device");
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 6c0fde55270d..66f411a6e8ea 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -109,9 +109,12 @@
109#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ 109#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
110#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ 110#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
111 111
112#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */ 112#define MEI_DEV_ID_LPT_H 0x8C3A /* Lynx Point H */
113#define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */ 113#define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */
114#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ 114#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
115#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
116
117#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
115/* 118/*
116 * MEI HW Section 119 * MEI HW Section
117 */ 120 */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b96205aece0c..2cab3c0a6805 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -76,9 +76,11 @@ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = {
76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, 76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, 77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, 78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)}, 79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)},
80 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)}, 80 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)},
81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, 81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
82 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)},
83 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)},
82 84
83 /* required last entry */ 85 /* required last entry */
84 {0, } 86 {0, }
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
index 8aa42e738acc..653799b96bfa 100644
--- a/drivers/misc/mic/card/mic_virtio.c
+++ b/drivers/misc/mic/card/mic_virtio.c
@@ -154,14 +154,14 @@ static void mic_reset_inform_host(struct virtio_device *vdev)
154{ 154{
155 struct mic_vdev *mvdev = to_micvdev(vdev); 155 struct mic_vdev *mvdev = to_micvdev(vdev);
156 struct mic_device_ctrl __iomem *dc = mvdev->dc; 156 struct mic_device_ctrl __iomem *dc = mvdev->dc;
157 int retry = 100, i; 157 int retry;
158 158
159 iowrite8(0, &dc->host_ack); 159 iowrite8(0, &dc->host_ack);
160 iowrite8(1, &dc->vdev_reset); 160 iowrite8(1, &dc->vdev_reset);
161 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); 161 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
162 162
163 /* Wait till host completes all card accesses and acks the reset */ 163 /* Wait till host completes all card accesses and acks the reset */
164 for (i = retry; i--;) { 164 for (retry = 100; retry--;) {
165 if (ioread8(&dc->host_ack)) 165 if (ioread8(&dc->host_ack))
166 break; 166 break;
167 msleep(100); 167 msleep(100);
@@ -187,11 +187,12 @@ static void mic_reset(struct virtio_device *vdev)
187/* 187/*
188 * The virtio_ring code calls this API when it wants to notify the Host. 188 * The virtio_ring code calls this API when it wants to notify the Host.
189 */ 189 */
190static void mic_notify(struct virtqueue *vq) 190static bool mic_notify(struct virtqueue *vq)
191{ 191{
192 struct mic_vdev *mvdev = vq->priv; 192 struct mic_vdev *mvdev = vq->priv;
193 193
194 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); 194 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
195 return true;
195} 196}
196 197
197static void mic_del_vq(struct virtqueue *vq, int n) 198static void mic_del_vq(struct virtqueue *vq, int n)
@@ -247,17 +248,17 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
247 /* First assign the vring's allocated in host memory */ 248 /* First assign the vring's allocated in host memory */
248 vqconfig = mic_vq_config(mvdev->desc) + index; 249 vqconfig = mic_vq_config(mvdev->desc) + index;
249 memcpy_fromio(&config, vqconfig, sizeof(config)); 250 memcpy_fromio(&config, vqconfig, sizeof(config));
250 _vr_size = vring_size(config.num, MIC_VIRTIO_RING_ALIGN); 251 _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
251 vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); 252 vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
252 va = mic_card_map(mvdev->mdev, config.address, vr_size); 253 va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size);
253 if (!va) 254 if (!va)
254 return ERR_PTR(-ENOMEM); 255 return ERR_PTR(-ENOMEM);
255 mvdev->vr[index] = va; 256 mvdev->vr[index] = va;
256 memset_io(va, 0x0, _vr_size); 257 memset_io(va, 0x0, _vr_size);
257 vq = vring_new_virtqueue(index, 258 vq = vring_new_virtqueue(index, le16_to_cpu(config.num),
258 config.num, MIC_VIRTIO_RING_ALIGN, vdev, 259 MIC_VIRTIO_RING_ALIGN, vdev, false,
259 false, 260 (void __force *)va, mic_notify, callback,
260 va, mic_notify, callback, name); 261 name);
261 if (!vq) { 262 if (!vq) {
262 err = -ENOMEM; 263 err = -ENOMEM;
263 goto unmap; 264 goto unmap;
@@ -272,7 +273,8 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
272 273
273 /* Allocate and reassign used ring now */ 274 /* Allocate and reassign used ring now */
274 mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + 275 mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
275 sizeof(struct vring_used_elem) * config.num); 276 sizeof(struct vring_used_elem) *
277 le16_to_cpu(config.num));
276 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 278 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
277 get_order(mvdev->used_size[index])); 279 get_order(mvdev->used_size[index]));
278 if (!used) { 280 if (!used) {
@@ -309,7 +311,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
309{ 311{
310 struct mic_vdev *mvdev = to_micvdev(vdev); 312 struct mic_vdev *mvdev = to_micvdev(vdev);
311 struct mic_device_ctrl __iomem *dc = mvdev->dc; 313 struct mic_device_ctrl __iomem *dc = mvdev->dc;
312 int i, err, retry = 100; 314 int i, err, retry;
313 315
314 /* We must have this many virtqueues. */ 316 /* We must have this many virtqueues. */
315 if (nvqs > ioread8(&mvdev->desc->num_vq)) 317 if (nvqs > ioread8(&mvdev->desc->num_vq))
@@ -331,7 +333,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
331 * rings have been re-assigned. 333 * rings have been re-assigned.
332 */ 334 */
333 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); 335 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
334 for (i = retry; i--;) { 336 for (retry = 100; retry--;) {
335 if (!ioread8(&dc->used_address_updated)) 337 if (!ioread8(&dc->used_address_updated))
336 break; 338 break;
337 msleep(100); 339 msleep(100);
@@ -519,8 +521,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
519 struct device *dev; 521 struct device *dev;
520 int ret; 522 int ret;
521 523
522 for (i = mic_aligned_size(struct mic_bootparam); 524 for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE;
523 i < MIC_DP_SIZE; i += mic_total_desc_size(d)) { 525 i += mic_total_desc_size(d)) {
524 d = mdrv->dp + i; 526 d = mdrv->dp + i;
525 dc = (void __iomem *)d + mic_aligned_desc_size(d); 527 dc = (void __iomem *)d + mic_aligned_desc_size(d);
526 /* 528 /*
@@ -539,7 +541,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
539 continue; 541 continue;
540 542
541 /* device already exists */ 543 /* device already exists */
542 dev = device_find_child(mdrv->dev, d, mic_match_desc); 544 dev = device_find_child(mdrv->dev, (void __force *)d,
545 mic_match_desc);
543 if (dev) { 546 if (dev) {
544 if (remove) 547 if (remove)
545 iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, 548 iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h
index 2c5c22c93ba8..d0407ba53bb7 100644
--- a/drivers/misc/mic/card/mic_virtio.h
+++ b/drivers/misc/mic/card/mic_virtio.h
@@ -42,8 +42,8 @@
42 42
43static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) 43static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc)
44{ 44{
45 return mic_aligned_size(*desc) 45 return sizeof(*desc)
46 + ioread8(&desc->num_vq) * mic_aligned_size(struct mic_vqconfig) 46 + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
47 + ioread8(&desc->feature_len) * 2 47 + ioread8(&desc->feature_len) * 2
48 + ioread8(&desc->config_len); 48 + ioread8(&desc->config_len);
49} 49}
@@ -67,8 +67,7 @@ mic_vq_configspace(struct mic_device_desc __iomem *desc)
67} 67}
68static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) 68static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc)
69{ 69{
70 return mic_aligned_desc_size(desc) + 70 return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
71 mic_aligned_size(struct mic_device_ctrl);
72} 71}
73 72
74int mic_devices_init(struct mic_driver *mdrv); 73int mic_devices_init(struct mic_driver *mdrv);
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 7558d9186438..b75c6b5cc20f 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -62,7 +62,7 @@ void mic_bootparam_init(struct mic_device *mdev)
62{ 62{
63 struct mic_bootparam *bootparam = mdev->dp; 63 struct mic_bootparam *bootparam = mdev->dp;
64 64
65 bootparam->magic = MIC_MAGIC; 65 bootparam->magic = cpu_to_le32(MIC_MAGIC);
66 bootparam->c2h_shutdown_db = mdev->shutdown_db; 66 bootparam->c2h_shutdown_db = mdev->shutdown_db;
67 bootparam->h2c_shutdown_db = -1; 67 bootparam->h2c_shutdown_db = -1;
68 bootparam->h2c_config_db = -1; 68 bootparam->h2c_config_db = -1;
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 5b8494bd1e00..e04bb4fe6823 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -41,7 +41,7 @@ static int mic_virtio_copy_to_user(struct mic_vdev *mvdev,
41 * We are copying from IO below an should ideally use something 41 * We are copying from IO below an should ideally use something
42 * like copy_to_user_fromio(..) if it existed. 42 * like copy_to_user_fromio(..) if it existed.
43 */ 43 */
44 if (copy_to_user(ubuf, dbuf, len)) { 44 if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
45 err = -EFAULT; 45 err = -EFAULT;
46 dev_err(mic_dev(mvdev), "%s %d err %d\n", 46 dev_err(mic_dev(mvdev), "%s %d err %d\n",
47 __func__, __LINE__, err); 47 __func__, __LINE__, err);
@@ -66,7 +66,7 @@ static int mic_virtio_copy_from_user(struct mic_vdev *mvdev,
66 * We are copying to IO below and should ideally use something 66 * We are copying to IO below and should ideally use something
67 * like copy_from_user_toio(..) if it existed. 67 * like copy_from_user_toio(..) if it existed.
68 */ 68 */
69 if (copy_from_user(dbuf, ubuf, len)) { 69 if (copy_from_user((void __force *)dbuf, ubuf, len)) {
70 err = -EFAULT; 70 err = -EFAULT;
71 dev_err(mic_dev(mvdev), "%s %d err %d\n", 71 dev_err(mic_dev(mvdev), "%s %d err %d\n",
72 __func__, __LINE__, err); 72 __func__, __LINE__, err);
@@ -293,7 +293,7 @@ static void mic_virtio_init_post(struct mic_vdev *mvdev)
293 continue; 293 continue;
294 } 294 }
295 mvdev->mvr[i].vrh.vring.used = 295 mvdev->mvr[i].vrh.vring.used =
296 mvdev->mdev->aper.va + 296 (void __force *)mvdev->mdev->aper.va +
297 le64_to_cpu(vqconfig[i].used_address); 297 le64_to_cpu(vqconfig[i].used_address);
298 } 298 }
299 299
@@ -378,7 +378,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev,
378 void __user *argp) 378 void __user *argp)
379{ 379{
380 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); 380 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
381 int ret = 0, retry = 100, i; 381 int ret = 0, retry, i;
382 struct mic_bootparam *bootparam = mvdev->mdev->dp; 382 struct mic_bootparam *bootparam = mvdev->mdev->dp;
383 s8 db = bootparam->h2c_config_db; 383 s8 db = bootparam->h2c_config_db;
384 384
@@ -401,7 +401,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev,
401 mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; 401 mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
402 mvdev->mdev->ops->send_intr(mvdev->mdev, db); 402 mvdev->mdev->ops->send_intr(mvdev->mdev, db);
403 403
404 for (i = retry; i--;) { 404 for (retry = 100; retry--;) {
405 ret = wait_event_timeout(wake, 405 ret = wait_event_timeout(wake,
406 mvdev->dc->guest_ack, msecs_to_jiffies(100)); 406 mvdev->dc->guest_ack, msecs_to_jiffies(100));
407 if (ret) 407 if (ret)
@@ -467,7 +467,7 @@ static int mic_copy_dp_entry(struct mic_vdev *mvdev,
467 } 467 }
468 468
469 /* Find the first free device page entry */ 469 /* Find the first free device page entry */
470 for (i = mic_aligned_size(struct mic_bootparam); 470 for (i = sizeof(struct mic_bootparam);
471 i < MIC_DP_SIZE - mic_total_desc_size(dd_config); 471 i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
472 i += mic_total_desc_size(devp)) { 472 i += mic_total_desc_size(devp)) {
473 devp = mdev->dp + i; 473 devp = mdev->dp + i;
@@ -525,6 +525,7 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
525 char irqname[10]; 525 char irqname[10];
526 struct mic_bootparam *bootparam = mdev->dp; 526 struct mic_bootparam *bootparam = mdev->dp;
527 u16 num; 527 u16 num;
528 dma_addr_t vr_addr;
528 529
529 mutex_lock(&mdev->mic_mutex); 530 mutex_lock(&mdev->mic_mutex);
530 531
@@ -559,17 +560,16 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
559 } 560 }
560 vr->len = vr_size; 561 vr->len = vr_size;
561 vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); 562 vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
562 vr->info->magic = MIC_MAGIC + mvdev->virtio_id + i; 563 vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
563 vqconfig[i].address = mic_map_single(mdev, 564 vr_addr = mic_map_single(mdev, vr->va, vr_size);
564 vr->va, vr_size); 565 if (mic_map_error(vr_addr)) {
565 if (mic_map_error(vqconfig[i].address)) {
566 free_pages((unsigned long)vr->va, get_order(vr_size)); 566 free_pages((unsigned long)vr->va, get_order(vr_size));
567 ret = -ENOMEM; 567 ret = -ENOMEM;
568 dev_err(mic_dev(mvdev), "%s %d err %d\n", 568 dev_err(mic_dev(mvdev), "%s %d err %d\n",
569 __func__, __LINE__, ret); 569 __func__, __LINE__, ret);
570 goto err; 570 goto err;
571 } 571 }
572 vqconfig[i].address = cpu_to_le64(vqconfig[i].address); 572 vqconfig[i].address = cpu_to_le64(vr_addr);
573 573
574 vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); 574 vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
575 ret = vringh_init_kern(&mvr->vrh, 575 ret = vringh_init_kern(&mvr->vrh,
@@ -639,7 +639,7 @@ void mic_virtio_del_device(struct mic_vdev *mvdev)
639 struct mic_vdev *tmp_mvdev; 639 struct mic_vdev *tmp_mvdev;
640 struct mic_device *mdev = mvdev->mdev; 640 struct mic_device *mdev = mvdev->mdev;
641 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); 641 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
642 int i, ret, retry = 100; 642 int i, ret, retry;
643 struct mic_vqconfig *vqconfig; 643 struct mic_vqconfig *vqconfig;
644 struct mic_bootparam *bootparam = mdev->dp; 644 struct mic_bootparam *bootparam = mdev->dp;
645 s8 db; 645 s8 db;
@@ -652,16 +652,16 @@ void mic_virtio_del_device(struct mic_vdev *mvdev)
652 "Requesting hot remove id %d\n", mvdev->virtio_id); 652 "Requesting hot remove id %d\n", mvdev->virtio_id);
653 mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; 653 mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
654 mdev->ops->send_intr(mdev, db); 654 mdev->ops->send_intr(mdev, db);
655 for (i = retry; i--;) { 655 for (retry = 100; retry--;) {
656 ret = wait_event_timeout(wake, 656 ret = wait_event_timeout(wake,
657 mvdev->dc->guest_ack, msecs_to_jiffies(100)); 657 mvdev->dc->guest_ack, msecs_to_jiffies(100));
658 if (ret) 658 if (ret)
659 break; 659 break;
660 } 660 }
661 dev_dbg(mdev->sdev->parent, 661 dev_dbg(mdev->sdev->parent,
662 "Device id %d config_change %d guest_ack %d\n", 662 "Device id %d config_change %d guest_ack %d retry %d\n",
663 mvdev->virtio_id, mvdev->dc->config_change, 663 mvdev->virtio_id, mvdev->dc->config_change,
664 mvdev->dc->guest_ack); 664 mvdev->dc->guest_ack, retry);
665 mvdev->dc->config_change = 0; 665 mvdev->dc->config_change = 0;
666 mvdev->dc->guest_ack = 0; 666 mvdev->dc->guest_ack = 0;
667skip_hot_remove: 667skip_hot_remove:
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 81e9541b784c..0dfa8a81436e 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -397,8 +397,8 @@ mic_x100_load_ramdisk(struct mic_device *mdev)
397 * so copy over the ramdisk @ 128M. 397 * so copy over the ramdisk @ 128M.
398 */ 398 */
399 memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size); 399 memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size);
400 iowrite32(cpu_to_le32(mdev->bootaddr << 1), &bp->hdr.ramdisk_image); 400 iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image);
401 iowrite32(cpu_to_le32(fw->size), &bp->hdr.ramdisk_size); 401 iowrite32(fw->size, &bp->hdr.ramdisk_size);
402 release_firmware(fw); 402 release_firmware(fw);
403error: 403error:
404 return rc; 404 return rc;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index ef8956568c3a..157b570ba343 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -308,8 +308,7 @@ static void sdio_acpi_set_handle(struct sdio_func *func)
308 struct mmc_host *host = func->card->host; 308 struct mmc_host *host = func->card->host;
309 u64 addr = (host->slotno << 16) | func->num; 309 u64 addr = (host->slotno << 16) | func->num;
310 310
311 ACPI_HANDLE_SET(&func->dev, 311 acpi_preset_companion(&func->dev, ACPI_HANDLE(host->parent), addr);
312 acpi_get_child(ACPI_HANDLE(host->parent), addr));
313} 312}
314#else 313#else
315static inline void sdio_acpi_set_handle(struct sdio_func *func) {} 314static inline void sdio_acpi_set_handle(struct sdio_func *func) {}
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 0b10a9030f4e..98b6b6ef7e5c 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/timer.h> 24#include <linux/timer.h>
25#include <linux/of.h>
25#include <linux/omap-dma.h> 26#include <linux/omap-dma.h>
26#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
27#include <linux/mmc/card.h> 28#include <linux/mmc/card.h>
@@ -90,17 +91,6 @@
90#define OMAP_MMC_CMDTYPE_AC 2 91#define OMAP_MMC_CMDTYPE_AC 2
91#define OMAP_MMC_CMDTYPE_ADTC 3 92#define OMAP_MMC_CMDTYPE_ADTC 3
92 93
93#define OMAP_DMA_MMC_TX 21
94#define OMAP_DMA_MMC_RX 22
95#define OMAP_DMA_MMC2_TX 54
96#define OMAP_DMA_MMC2_RX 55
97
98#define OMAP24XX_DMA_MMC2_TX 47
99#define OMAP24XX_DMA_MMC2_RX 48
100#define OMAP24XX_DMA_MMC1_TX 61
101#define OMAP24XX_DMA_MMC1_RX 62
102
103
104#define DRIVER_NAME "mmci-omap" 94#define DRIVER_NAME "mmci-omap"
105 95
106/* Specifies how often in millisecs to poll for card status changes 96/* Specifies how often in millisecs to poll for card status changes
@@ -1330,7 +1320,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
1330 struct mmc_omap_host *host = NULL; 1320 struct mmc_omap_host *host = NULL;
1331 struct resource *res; 1321 struct resource *res;
1332 dma_cap_mask_t mask; 1322 dma_cap_mask_t mask;
1333 unsigned sig; 1323 unsigned sig = 0;
1334 int i, ret = 0; 1324 int i, ret = 0;
1335 int irq; 1325 int irq;
1336 1326
@@ -1340,7 +1330,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
1340 } 1330 }
1341 if (pdata->nr_slots == 0) { 1331 if (pdata->nr_slots == 0) {
1342 dev_err(&pdev->dev, "no slots\n"); 1332 dev_err(&pdev->dev, "no slots\n");
1343 return -ENXIO; 1333 return -EPROBE_DEFER;
1344 } 1334 }
1345 1335
1346 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1336 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1407,19 +1397,20 @@ static int mmc_omap_probe(struct platform_device *pdev)
1407 host->dma_tx_burst = -1; 1397 host->dma_tx_burst = -1;
1408 host->dma_rx_burst = -1; 1398 host->dma_rx_burst = -1;
1409 1399
1410 if (mmc_omap2()) 1400 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
1411 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX; 1401 if (res)
1412 else 1402 sig = res->start;
1413 sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX; 1403 host->dma_tx = dma_request_slave_channel_compat(mask,
1414 host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); 1404 omap_dma_filter_fn, &sig, &pdev->dev, "tx");
1415 if (!host->dma_tx) 1405 if (!host->dma_tx)
1416 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", 1406 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
1417 sig); 1407 sig);
1418 if (mmc_omap2()) 1408
1419 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX; 1409 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
1420 else 1410 if (res)
1421 sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX; 1411 sig = res->start;
1422 host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); 1412 host->dma_rx = dma_request_slave_channel_compat(mask,
1413 omap_dma_filter_fn, &sig, &pdev->dev, "rx");
1423 if (!host->dma_rx) 1414 if (!host->dma_rx)
1424 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", 1415 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
1425 sig); 1416 sig);
@@ -1512,12 +1503,20 @@ static int mmc_omap_remove(struct platform_device *pdev)
1512 return 0; 1503 return 0;
1513} 1504}
1514 1505
1506#if IS_BUILTIN(CONFIG_OF)
1507static const struct of_device_id mmc_omap_match[] = {
1508 { .compatible = "ti,omap2420-mmc", },
1509 { },
1510};
1511#endif
1512
1515static struct platform_driver mmc_omap_driver = { 1513static struct platform_driver mmc_omap_driver = {
1516 .probe = mmc_omap_probe, 1514 .probe = mmc_omap_probe,
1517 .remove = mmc_omap_remove, 1515 .remove = mmc_omap_remove,
1518 .driver = { 1516 .driver = {
1519 .name = DRIVER_NAME, 1517 .name = DRIVER_NAME,
1520 .owner = THIS_MODULE, 1518 .owner = THIS_MODULE,
1519 .of_match_table = of_match_ptr(mmc_omap_match),
1521 }, 1520 },
1522}; 1521};
1523 1522
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index d78a97d4153a..59f08c44abdb 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
375 375
376 dma_dev = host->dma_chan->device; 376 dma_dev = host->dma_chan->device;
377 377
378 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | 378 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
379 DMA_COMPL_SKIP_DEST_UNMAP;
380 379
381 phys_addr = dma_map_single(dma_dev->dev, p, len, dir); 380 phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
382 if (dma_mapping_error(dma_dev->dev, phys_addr)) { 381 if (dma_mapping_error(dma_dev->dev, phys_addr)) {
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 3dc1a7564d87..8b2752263db9 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
573 dma_dev = chan->device; 573 dma_dev = chan->device;
574 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); 574 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
575 575
576 flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
577
578 if (direction == DMA_TO_DEVICE) { 576 if (direction == DMA_TO_DEVICE) {
579 dma_src = dma_addr; 577 dma_src = dma_addr;
580 dma_dst = host->data_pa; 578 dma_dst = host->data_pa;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4cabdc9fda90..4b3aaa898a8b 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -962,7 +962,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
962static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info) 962static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
963{ 963{
964 struct platform_device *pdev = info->pdev; 964 struct platform_device *pdev = info->pdev;
965 if (use_dma) { 965 if (info->use_dma) {
966 pxa_free_dma(info->data_dma_ch); 966 pxa_free_dma(info->data_dma_ch);
967 dma_free_coherent(&pdev->dev, info->buf_size, 967 dma_free_coherent(&pdev->dev, info->buf_size,
968 info->data_buff, info->data_buff_phys); 968 info->data_buff, info->data_buff_phys);
@@ -1259,10 +1259,6 @@ static struct of_device_id pxa3xx_nand_dt_ids[] = {
1259 .compatible = "marvell,pxa3xx-nand", 1259 .compatible = "marvell,pxa3xx-nand",
1260 .data = (void *)PXA3XX_NAND_VARIANT_PXA, 1260 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
1261 }, 1261 },
1262 {
1263 .compatible = "marvell,armada370-nand",
1264 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
1265 },
1266 {} 1262 {}
1267}; 1263};
1268MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); 1264MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4dd5ee2a34cc..398e299ee1bd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4110,7 +4110,7 @@ static int bond_check_params(struct bond_params *params)
4110 if (!miimon) { 4110 if (!miimon) {
4111 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); 4111 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
4112 pr_warning("Forcing miimon to 100msec\n"); 4112 pr_warning("Forcing miimon to 100msec\n");
4113 miimon = 100; 4113 miimon = BOND_DEFAULT_MIIMON;
4114 } 4114 }
4115 } 4115 }
4116 4116
@@ -4147,7 +4147,7 @@ static int bond_check_params(struct bond_params *params)
4147 if (!miimon) { 4147 if (!miimon) {
4148 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n"); 4148 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
4149 pr_warning("Forcing miimon to 100msec\n"); 4149 pr_warning("Forcing miimon to 100msec\n");
4150 miimon = 100; 4150 miimon = BOND_DEFAULT_MIIMON;
4151 } 4151 }
4152 } 4152 }
4153 4153
@@ -4199,9 +4199,9 @@ static int bond_check_params(struct bond_params *params)
4199 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { 4199 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
4200 /* not complete check, but should be good enough to 4200 /* not complete check, but should be good enough to
4201 catch mistakes */ 4201 catch mistakes */
4202 __be32 ip = in_aton(arp_ip_target[i]); 4202 __be32 ip;
4203 if (!isdigit(arp_ip_target[i][0]) || ip == 0 || 4203 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4204 ip == htonl(INADDR_BROADCAST)) { 4204 IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
4205 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 4205 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4206 arp_ip_target[i]); 4206 arp_ip_target[i]);
4207 arp_interval = 0; 4207 arp_interval = 0;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 9a5223c7b4d1..ea6f640782b7 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -45,10 +45,15 @@ int bond_option_mode_set(struct bonding *bond, int mode)
45 return -EPERM; 45 return -EPERM;
46 } 46 }
47 47
48 if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) { 48 if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) {
49 pr_err("%s: %s mode is incompatible with arp monitoring.\n", 49 pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
50 bond->dev->name, bond_mode_tbl[mode].modename); 50 bond->dev->name, bond_mode_tbl[mode].modename);
51 return -EINVAL; 51 /* disable arp monitoring */
52 bond->params.arp_interval = 0;
53 /* set miimon to default value */
54 bond->params.miimon = BOND_DEFAULT_MIIMON;
55 pr_info("%s: Setting MII monitoring interval to %d.\n",
56 bond->dev->name, bond->params.miimon);
52 } 57 }
53 58
54 /* don't cache arp_validate between modes */ 59 /* don't cache arp_validate between modes */
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index bc8fd362a5aa..0ae580bbc5db 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -523,9 +523,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
523 ret = -EINVAL; 523 ret = -EINVAL;
524 goto out; 524 goto out;
525 } 525 }
526 if (bond->params.mode == BOND_MODE_ALB || 526 if (BOND_NO_USES_ARP(bond->params.mode)) {
527 bond->params.mode == BOND_MODE_TLB) { 527 pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
528 pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n",
529 bond->dev->name, bond->dev->name); 528 bond->dev->name, bond->dev->name);
530 ret = -EINVAL; 529 ret = -EINVAL;
531 goto out; 530 goto out;
@@ -603,15 +602,14 @@ static ssize_t bonding_store_arp_targets(struct device *d,
603 return restart_syscall(); 602 return restart_syscall();
604 603
605 targets = bond->params.arp_targets; 604 targets = bond->params.arp_targets;
606 newtarget = in_aton(buf + 1); 605 if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) ||
606 IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) {
607 pr_err("%s: invalid ARP target %pI4 specified for addition\n",
608 bond->dev->name, &newtarget);
609 goto out;
610 }
607 /* look for adds */ 611 /* look for adds */
608 if (buf[0] == '+') { 612 if (buf[0] == '+') {
609 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
610 pr_err("%s: invalid ARP target %pI4 specified for addition\n",
611 bond->dev->name, &newtarget);
612 goto out;
613 }
614
615 if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */ 613 if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */
616 pr_err("%s: ARP target %pI4 is already present\n", 614 pr_err("%s: ARP target %pI4 is already present\n",
617 bond->dev->name, &newtarget); 615 bond->dev->name, &newtarget);
@@ -634,12 +632,6 @@ static ssize_t bonding_store_arp_targets(struct device *d,
634 targets[ind] = newtarget; 632 targets[ind] = newtarget;
635 write_unlock_bh(&bond->lock); 633 write_unlock_bh(&bond->lock);
636 } else if (buf[0] == '-') { 634 } else if (buf[0] == '-') {
637 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
638 pr_err("%s: invalid ARP target %pI4 specified for removal\n",
639 bond->dev->name, &newtarget);
640 goto out;
641 }
642
643 ind = bond_get_targets_ip(targets, newtarget); 635 ind = bond_get_targets_ip(targets, newtarget);
644 if (ind == -1) { 636 if (ind == -1) {
645 pr_err("%s: unable to remove nonexistent ARP target %pI4.\n", 637 pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
@@ -701,6 +693,8 @@ static ssize_t bonding_store_downdelay(struct device *d,
701 int new_value, ret = count; 693 int new_value, ret = count;
702 struct bonding *bond = to_bond(d); 694 struct bonding *bond = to_bond(d);
703 695
696 if (!rtnl_trylock())
697 return restart_syscall();
704 if (!(bond->params.miimon)) { 698 if (!(bond->params.miimon)) {
705 pr_err("%s: Unable to set down delay as MII monitoring is disabled\n", 699 pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
706 bond->dev->name); 700 bond->dev->name);
@@ -734,6 +728,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
734 } 728 }
735 729
736out: 730out:
731 rtnl_unlock();
737 return ret; 732 return ret;
738} 733}
739static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, 734static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
@@ -756,6 +751,8 @@ static ssize_t bonding_store_updelay(struct device *d,
756 int new_value, ret = count; 751 int new_value, ret = count;
757 struct bonding *bond = to_bond(d); 752 struct bonding *bond = to_bond(d);
758 753
754 if (!rtnl_trylock())
755 return restart_syscall();
759 if (!(bond->params.miimon)) { 756 if (!(bond->params.miimon)) {
760 pr_err("%s: Unable to set up delay as MII monitoring is disabled\n", 757 pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
761 bond->dev->name); 758 bond->dev->name);
@@ -789,6 +786,7 @@ static ssize_t bonding_store_updelay(struct device *d,
789 } 786 }
790 787
791out: 788out:
789 rtnl_unlock();
792 return ret; 790 return ret;
793} 791}
794static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, 792static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
@@ -1637,12 +1635,12 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
1637 char *buf) 1635 char *buf)
1638{ 1636{
1639 struct bonding *bond = to_bond(d); 1637 struct bonding *bond = to_bond(d);
1640 int packets_per_slave = bond->params.packets_per_slave; 1638 unsigned int packets_per_slave = bond->params.packets_per_slave;
1641 1639
1642 if (packets_per_slave > 1) 1640 if (packets_per_slave > 1)
1643 packets_per_slave = reciprocal_value(packets_per_slave); 1641 packets_per_slave = reciprocal_value(packets_per_slave);
1644 1642
1645 return sprintf(buf, "%d\n", packets_per_slave); 1643 return sprintf(buf, "%u\n", packets_per_slave);
1646} 1644}
1647 1645
1648static ssize_t bonding_store_packets_per_slave(struct device *d, 1646static ssize_t bonding_store_packets_per_slave(struct device *d,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 77a07a12e77f..a9f4f9f4d8ce 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -35,6 +35,8 @@
35 35
36#define BOND_MAX_ARP_TARGETS 16 36#define BOND_MAX_ARP_TARGETS 16
37 37
38#define BOND_DEFAULT_MIIMON 100
39
38#define IS_UP(dev) \ 40#define IS_UP(dev) \
39 ((((dev)->flags & IFF_UP) == IFF_UP) && \ 41 ((((dev)->flags & IFF_UP) == IFF_UP) && \
40 netif_running(dev) && \ 42 netif_running(dev) && \
@@ -55,6 +57,11 @@
55 ((mode) == BOND_MODE_TLB) || \ 57 ((mode) == BOND_MODE_TLB) || \
56 ((mode) == BOND_MODE_ALB)) 58 ((mode) == BOND_MODE_ALB))
57 59
60#define BOND_NO_USES_ARP(mode) \
61 (((mode) == BOND_MODE_8023AD) || \
62 ((mode) == BOND_MODE_TLB) || \
63 ((mode) == BOND_MODE_ALB))
64
58#define TX_QUEUE_OVERRIDE(mode) \ 65#define TX_QUEUE_OVERRIDE(mode) \
59 (((mode) == BOND_MODE_ACTIVEBACKUP) || \ 66 (((mode) == BOND_MODE_ACTIVEBACKUP) || \
60 ((mode) == BOND_MODE_ROUNDROBIN)) 67 ((mode) == BOND_MODE_ROUNDROBIN))
@@ -63,6 +70,9 @@
63 (((mode) == BOND_MODE_TLB) || \ 70 (((mode) == BOND_MODE_TLB) || \
64 ((mode) == BOND_MODE_ALB)) 71 ((mode) == BOND_MODE_ALB))
65 72
73#define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \
74 ((htonl(INADDR_BROADCAST) == a) || \
75 ipv4_is_zeronet(a))
66/* 76/*
67 * Less bad way to call ioctl from within the kernel; this needs to be 77 * Less bad way to call ioctl from within the kernel; this needs to be
68 * done some other way to get the call out of interrupt context. 78 * done some other way to get the call out of interrupt context.
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e3fc07cf2f62..77061eebb034 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -712,22 +712,31 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
712 return 0; 712 return 0;
713} 713}
714 714
715static int c_can_get_berr_counter(const struct net_device *dev, 715static int __c_can_get_berr_counter(const struct net_device *dev,
716 struct can_berr_counter *bec) 716 struct can_berr_counter *bec)
717{ 717{
718 unsigned int reg_err_counter; 718 unsigned int reg_err_counter;
719 struct c_can_priv *priv = netdev_priv(dev); 719 struct c_can_priv *priv = netdev_priv(dev);
720 720
721 c_can_pm_runtime_get_sync(priv);
722
723 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); 721 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
724 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> 722 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
725 ERR_CNT_REC_SHIFT; 723 ERR_CNT_REC_SHIFT;
726 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; 724 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
727 725
726 return 0;
727}
728
729static int c_can_get_berr_counter(const struct net_device *dev,
730 struct can_berr_counter *bec)
731{
732 struct c_can_priv *priv = netdev_priv(dev);
733 int err;
734
735 c_can_pm_runtime_get_sync(priv);
736 err = __c_can_get_berr_counter(dev, bec);
728 c_can_pm_runtime_put_sync(priv); 737 c_can_pm_runtime_put_sync(priv);
729 738
730 return 0; 739 return err;
731} 740}
732 741
733/* 742/*
@@ -754,6 +763,7 @@ static void c_can_do_tx(struct net_device *dev)
754 if (!(val & (1 << (msg_obj_no - 1)))) { 763 if (!(val & (1 << (msg_obj_no - 1)))) {
755 can_get_echo_skb(dev, 764 can_get_echo_skb(dev,
756 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 765 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
766 c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL);
757 stats->tx_bytes += priv->read_reg(priv, 767 stats->tx_bytes += priv->read_reg(priv,
758 C_CAN_IFACE(MSGCTRL_REG, 0)) 768 C_CAN_IFACE(MSGCTRL_REG, 0))
759 & IF_MCONT_DLC_MASK; 769 & IF_MCONT_DLC_MASK;
@@ -872,7 +882,7 @@ static int c_can_handle_state_change(struct net_device *dev,
872 if (unlikely(!skb)) 882 if (unlikely(!skb))
873 return 0; 883 return 0;
874 884
875 c_can_get_berr_counter(dev, &bec); 885 __c_can_get_berr_counter(dev, &bec);
876 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); 886 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
877 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> 887 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
878 ERR_CNT_RP_SHIFT; 888 ERR_CNT_RP_SHIFT;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ae08cf129ebb..aaed97bee471 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1020,13 +1020,13 @@ static int flexcan_probe(struct platform_device *pdev)
1020 dev_err(&pdev->dev, "no ipg clock defined\n"); 1020 dev_err(&pdev->dev, "no ipg clock defined\n");
1021 return PTR_ERR(clk_ipg); 1021 return PTR_ERR(clk_ipg);
1022 } 1022 }
1023 clock_freq = clk_get_rate(clk_ipg);
1024 1023
1025 clk_per = devm_clk_get(&pdev->dev, "per"); 1024 clk_per = devm_clk_get(&pdev->dev, "per");
1026 if (IS_ERR(clk_per)) { 1025 if (IS_ERR(clk_per)) {
1027 dev_err(&pdev->dev, "no per clock defined\n"); 1026 dev_err(&pdev->dev, "no per clock defined\n");
1028 return PTR_ERR(clk_per); 1027 return PTR_ERR(clk_per);
1029 } 1028 }
1029 clock_freq = clk_get_rate(clk_per);
1030 } 1030 }
1031 1031
1032 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1032 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 7164a999f50f..f17c3018b7c7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -494,20 +494,20 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
494 uint8_t isrc, status; 494 uint8_t isrc, status;
495 int n = 0; 495 int n = 0;
496 496
497 /* Shared interrupts and IRQ off? */
498 if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
499 return IRQ_NONE;
500
501 if (priv->pre_irq) 497 if (priv->pre_irq)
502 priv->pre_irq(priv); 498 priv->pre_irq(priv);
503 499
500 /* Shared interrupts and IRQ off? */
501 if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
502 goto out;
503
504 while ((isrc = priv->read_reg(priv, SJA1000_IR)) && 504 while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
505 (n < SJA1000_MAX_IRQ)) { 505 (n < SJA1000_MAX_IRQ)) {
506 n++; 506
507 status = priv->read_reg(priv, SJA1000_SR); 507 status = priv->read_reg(priv, SJA1000_SR);
508 /* check for absent controller due to hw unplug */ 508 /* check for absent controller due to hw unplug */
509 if (status == 0xFF && sja1000_is_absent(priv)) 509 if (status == 0xFF && sja1000_is_absent(priv))
510 return IRQ_NONE; 510 goto out;
511 511
512 if (isrc & IRQ_WUI) 512 if (isrc & IRQ_WUI)
513 netdev_warn(dev, "wakeup interrupt\n"); 513 netdev_warn(dev, "wakeup interrupt\n");
@@ -535,7 +535,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
535 status = priv->read_reg(priv, SJA1000_SR); 535 status = priv->read_reg(priv, SJA1000_SR);
536 /* check for absent controller */ 536 /* check for absent controller */
537 if (status == 0xFF && sja1000_is_absent(priv)) 537 if (status == 0xFF && sja1000_is_absent(priv))
538 return IRQ_NONE; 538 goto out;
539 } 539 }
540 } 540 }
541 if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { 541 if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
@@ -543,8 +543,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
543 if (sja1000_err(dev, isrc, status)) 543 if (sja1000_err(dev, isrc, status))
544 break; 544 break;
545 } 545 }
546 n++;
546 } 547 }
547 548out:
548 if (priv->post_irq) 549 if (priv->post_irq)
549 priv->post_irq(priv); 550 priv->post_irq(priv);
550 551
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 50b853a79d77..46dfb1378c17 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev)
717 if (netif_msg_ifup(db)) 717 if (netif_msg_ifup(db))
718 dev_dbg(db->dev, "enabling %s\n", dev->name); 718 dev_dbg(db->dev, "enabling %s\n", dev->name);
719 719
720 if (devm_request_irq(db->dev, dev->irq, &emac_interrupt, 720 if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev))
721 0, dev->name, dev))
722 return -EAGAIN; 721 return -EAGAIN;
723 722
724 /* Initialize EMAC board */ 723 /* Initialize EMAC board */
@@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev)
774 773
775 emac_shutdown(ndev); 774 emac_shutdown(ndev);
776 775
776 free_irq(ndev->irq, ndev);
777
777 return 0; 778 return 0;
778} 779}
779 780
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 5aa5e8146496..c3c4c266b846 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1388,6 +1388,9 @@ static int alx_resume(struct device *dev)
1388{ 1388{
1389 struct pci_dev *pdev = to_pci_dev(dev); 1389 struct pci_dev *pdev = to_pci_dev(dev);
1390 struct alx_priv *alx = pci_get_drvdata(pdev); 1390 struct alx_priv *alx = pci_get_drvdata(pdev);
1391 struct alx_hw *hw = &alx->hw;
1392
1393 alx_reset_phy(hw);
1391 1394
1392 if (!netif_running(alx->dev)) 1395 if (!netif_running(alx->dev))
1393 return 0; 1396 return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4e01c57d8c8d..a1f66e2c9a86 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1376,7 +1376,6 @@ enum {
1376 BNX2X_SP_RTNL_RX_MODE, 1376 BNX2X_SP_RTNL_RX_MODE,
1377 BNX2X_SP_RTNL_HYPERVISOR_VLAN, 1377 BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1378 BNX2X_SP_RTNL_TX_STOP, 1378 BNX2X_SP_RTNL_TX_STOP,
1379 BNX2X_SP_RTNL_TX_RESUME,
1380}; 1379};
1381 1380
1382struct bnx2x_prev_path_list { 1381struct bnx2x_prev_path_list {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dcafbda3e5be..ec96130533cc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2959,6 +2959,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2959 2959
2960 bp->port.pmf = 0; 2960 bp->port.pmf = 0;
2961 2961
2962 /* clear pending work in rtnl task */
2963 bp->sp_rtnl_state = 0;
2964 smp_mb();
2965
2962 /* Free SKBs, SGEs, TPA pool and driver internals */ 2966 /* Free SKBs, SGEs, TPA pool and driver internals */
2963 bnx2x_free_skbs(bp); 2967 bnx2x_free_skbs(bp);
2964 if (CNIC_LOADED(bp)) 2968 if (CNIC_LOADED(bp))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fcf2761d8828..fdace204b054 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -778,11 +778,6 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
778 778
779 /* ets may affect cmng configuration: reinit it in hw */ 779 /* ets may affect cmng configuration: reinit it in hw */
780 bnx2x_set_local_cmng(bp); 780 bnx2x_set_local_cmng(bp);
781
782 set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
783
784 schedule_delayed_work(&bp->sp_rtnl_task, 0);
785
786 return; 781 return;
787 case BNX2X_DCBX_STATE_TX_RELEASED: 782 case BNX2X_DCBX_STATE_TX_RELEASED:
788 DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); 783 DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e622cc1f96ff..814d0eca9b33 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -577,7 +577,9 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
578 if (rc) { 578 if (rc) {
579 BNX2X_ERR("DMAE returned failure %d\n", rc); 579 BNX2X_ERR("DMAE returned failure %d\n", rc);
580#ifdef BNX2X_STOP_ON_ERROR
580 bnx2x_panic(); 581 bnx2x_panic();
582#endif
581 } 583 }
582} 584}
583 585
@@ -614,7 +616,9 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
614 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 616 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
615 if (rc) { 617 if (rc) {
616 BNX2X_ERR("DMAE returned failure %d\n", rc); 618 BNX2X_ERR("DMAE returned failure %d\n", rc);
619#ifdef BNX2X_STOP_ON_ERROR
617 bnx2x_panic(); 620 bnx2x_panic();
621#endif
618 } 622 }
619} 623}
620 624
@@ -5231,18 +5235,18 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5231 5235
5232 case EVENT_RING_OPCODE_STOP_TRAFFIC: 5236 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5233 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); 5237 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5238 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5234 if (f_obj->complete_cmd(bp, f_obj, 5239 if (f_obj->complete_cmd(bp, f_obj,
5235 BNX2X_F_CMD_TX_STOP)) 5240 BNX2X_F_CMD_TX_STOP))
5236 break; 5241 break;
5237 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5238 goto next_spqe; 5242 goto next_spqe;
5239 5243
5240 case EVENT_RING_OPCODE_START_TRAFFIC: 5244 case EVENT_RING_OPCODE_START_TRAFFIC:
5241 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); 5245 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5246 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5242 if (f_obj->complete_cmd(bp, f_obj, 5247 if (f_obj->complete_cmd(bp, f_obj,
5243 BNX2X_F_CMD_TX_START)) 5248 BNX2X_F_CMD_TX_START))
5244 break; 5249 break;
5245 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5246 goto next_spqe; 5250 goto next_spqe;
5247 5251
5248 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 5252 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
@@ -9352,6 +9356,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9352 bnx2x_process_kill_chip_reset(bp, global); 9356 bnx2x_process_kill_chip_reset(bp, global);
9353 barrier(); 9357 barrier();
9354 9358
9359 /* clear errors in PGB */
9360 if (!CHIP_IS_E1x(bp))
9361 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9362
9355 /* Recover after reset: */ 9363 /* Recover after reset: */
9356 /* MCP */ 9364 /* MCP */
9357 if (global && bnx2x_reset_mcp_comp(bp, val)) 9365 if (global && bnx2x_reset_mcp_comp(bp, val))
@@ -9706,11 +9714,10 @@ sp_rtnl_not_reset:
9706 &bp->sp_rtnl_state)) 9714 &bp->sp_rtnl_state))
9707 bnx2x_pf_set_vfs_vlan(bp); 9715 bnx2x_pf_set_vfs_vlan(bp);
9708 9716
9709 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) 9717 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
9710 bnx2x_dcbx_stop_hw_tx(bp); 9718 bnx2x_dcbx_stop_hw_tx(bp);
9711
9712 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
9713 bnx2x_dcbx_resume_hw_tx(bp); 9719 bnx2x_dcbx_resume_hw_tx(bp);
9720 }
9714 9721
9715 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9722 /* work which needs rtnl lock not-taken (as it takes the lock itself and
9716 * can be called from other contexts as well) 9723 * can be called from other contexts as well)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 5ecf267dc4cc..3efbb35267c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2864,6 +2864,17 @@
2864#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 2864#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
2865#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 2865#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
2866#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 2866#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
2867/* [W 7] Writing 1 to each bit in this register clears a corresponding error
2868 * details register and enables logging new error details. Bit 0 - clears
2869 * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears
2870 * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS
2871 * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32
2872 * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 -
2873 * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears
2874 * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6
2875 * - clears TCPL_IN_TWO_RCBS_DETAILS. */
2876#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c
2877
2867/* [R 9] Interrupt register #0 read */ 2878/* [R 9] Interrupt register #0 read */
2868#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 2879#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
2869/* [RC 9] Interrupt register #0 read clear */ 2880/* [RC 9] Interrupt register #0 read clear */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 0216d592d0ce..2e46c28fc601 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -3114,6 +3114,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3114{ 3114{
3115 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3115 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3116 3116
3117 if (!IS_SRIOV(bp)) {
3118 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
3119 return -EINVAL;
3120 }
3121
3117 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3122 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3118 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3123 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3119 3124
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 9199adf32d33..efa8a151d789 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -152,7 +152,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
152 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { 152 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
153 DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); 153 DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
154 *done = PFVF_STATUS_SUCCESS; 154 *done = PFVF_STATUS_SUCCESS;
155 return 0; 155 return -EINVAL;
156 } 156 }
157 157
158 /* Write message address */ 158 /* Write message address */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 00c5be8c55b8..f3dd93b4aeaa 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8932,6 +8932,9 @@ static int tg3_chip_reset(struct tg3 *tp)
8932 void (*write_op)(struct tg3 *, u32, u32); 8932 void (*write_op)(struct tg3 *, u32, u32);
8933 int i, err; 8933 int i, err;
8934 8934
8935 if (!pci_device_is_present(tp->pdev))
8936 return -ENODEV;
8937
8935 tg3_nvram_lock(tp); 8938 tg3_nvram_lock(tp);
8936 8939
8937 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 8940 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
@@ -10629,10 +10632,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10629static ssize_t tg3_show_temp(struct device *dev, 10632static ssize_t tg3_show_temp(struct device *dev,
10630 struct device_attribute *devattr, char *buf) 10633 struct device_attribute *devattr, char *buf)
10631{ 10634{
10632 struct pci_dev *pdev = to_pci_dev(dev);
10633 struct net_device *netdev = pci_get_drvdata(pdev);
10634 struct tg3 *tp = netdev_priv(netdev);
10635 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10635 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10636 struct tg3 *tp = dev_get_drvdata(dev);
10636 u32 temperature; 10637 u32 temperature;
10637 10638
10638 spin_lock_bh(&tp->lock); 10639 spin_lock_bh(&tp->lock);
@@ -10650,29 +10651,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10650static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, 10651static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10651 TG3_TEMP_MAX_OFFSET); 10652 TG3_TEMP_MAX_OFFSET);
10652 10653
10653static struct attribute *tg3_attributes[] = { 10654static struct attribute *tg3_attrs[] = {
10654 &sensor_dev_attr_temp1_input.dev_attr.attr, 10655 &sensor_dev_attr_temp1_input.dev_attr.attr,
10655 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10656 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10656 &sensor_dev_attr_temp1_max.dev_attr.attr, 10657 &sensor_dev_attr_temp1_max.dev_attr.attr,
10657 NULL 10658 NULL
10658}; 10659};
10659 10660ATTRIBUTE_GROUPS(tg3);
10660static const struct attribute_group tg3_group = {
10661 .attrs = tg3_attributes,
10662};
10663 10661
10664static void tg3_hwmon_close(struct tg3 *tp) 10662static void tg3_hwmon_close(struct tg3 *tp)
10665{ 10663{
10666 if (tp->hwmon_dev) { 10664 if (tp->hwmon_dev) {
10667 hwmon_device_unregister(tp->hwmon_dev); 10665 hwmon_device_unregister(tp->hwmon_dev);
10668 tp->hwmon_dev = NULL; 10666 tp->hwmon_dev = NULL;
10669 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10670 } 10667 }
10671} 10668}
10672 10669
10673static void tg3_hwmon_open(struct tg3 *tp) 10670static void tg3_hwmon_open(struct tg3 *tp)
10674{ 10671{
10675 int i, err; 10672 int i;
10676 u32 size = 0; 10673 u32 size = 0;
10677 struct pci_dev *pdev = tp->pdev; 10674 struct pci_dev *pdev = tp->pdev;
10678 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10675 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
@@ -10690,18 +10687,11 @@ static void tg3_hwmon_open(struct tg3 *tp)
10690 if (!size) 10687 if (!size)
10691 return; 10688 return;
10692 10689
10693 /* Register hwmon sysfs hooks */ 10690 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10694 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); 10691 tp, tg3_groups);
10695 if (err) {
10696 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10697 return;
10698 }
10699
10700 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10701 if (IS_ERR(tp->hwmon_dev)) { 10692 if (IS_ERR(tp->hwmon_dev)) {
10702 tp->hwmon_dev = NULL; 10693 tp->hwmon_dev = NULL;
10703 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10694 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10704 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10705 } 10695 }
10706} 10696}
10707 10697
@@ -11594,10 +11584,11 @@ static int tg3_close(struct net_device *dev)
11594 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); 11584 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11595 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); 11585 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11596 11586
11597 tg3_power_down_prepare(tp); 11587 if (pci_device_is_present(tp->pdev)) {
11598 11588 tg3_power_down_prepare(tp);
11599 tg3_carrier_off(tp);
11600 11589
11590 tg3_carrier_off(tp);
11591 }
11601 return 0; 11592 return 0;
11602} 11593}
11603 11594
@@ -13618,16 +13609,9 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,
13618 if (stmpconf.flags) 13609 if (stmpconf.flags)
13619 return -EINVAL; 13610 return -EINVAL;
13620 13611
13621 switch (stmpconf.tx_type) { 13612 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13622 case HWTSTAMP_TX_ON: 13613 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13623 tg3_flag_set(tp, TX_TSTAMP_EN);
13624 break;
13625 case HWTSTAMP_TX_OFF:
13626 tg3_flag_clear(tp, TX_TSTAMP_EN);
13627 break;
13628 default:
13629 return -ERANGE; 13614 return -ERANGE;
13630 }
13631 13615
13632 switch (stmpconf.rx_filter) { 13616 switch (stmpconf.rx_filter) {
13633 case HWTSTAMP_FILTER_NONE: 13617 case HWTSTAMP_FILTER_NONE:
@@ -13689,6 +13673,11 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,
13689 tw32(TG3_RX_PTP_CTL, 13673 tw32(TG3_RX_PTP_CTL,
13690 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13674 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13691 13675
13676 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13677 tg3_flag_set(tp, TX_TSTAMP_EN);
13678 else
13679 tg3_flag_clear(tp, TX_TSTAMP_EN);
13680
13692 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13681 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13693 -EFAULT : 0; 13682 -EFAULT : 0;
13694} 13683}
@@ -16514,6 +16503,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16514 /* Clear this out for sanity. */ 16503 /* Clear this out for sanity. */
16515 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16504 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16516 16505
16506 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16507 tw32(TG3PCI_REG_BASE_ADDR, 0);
16508
16517 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16509 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16518 &pci_state_reg); 16510 &pci_state_reg);
16519 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16511 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
@@ -17741,10 +17733,12 @@ static int tg3_suspend(struct device *device)
17741 struct pci_dev *pdev = to_pci_dev(device); 17733 struct pci_dev *pdev = to_pci_dev(device);
17742 struct net_device *dev = pci_get_drvdata(pdev); 17734 struct net_device *dev = pci_get_drvdata(pdev);
17743 struct tg3 *tp = netdev_priv(dev); 17735 struct tg3 *tp = netdev_priv(dev);
17744 int err; 17736 int err = 0;
17737
17738 rtnl_lock();
17745 17739
17746 if (!netif_running(dev)) 17740 if (!netif_running(dev))
17747 return 0; 17741 goto unlock;
17748 17742
17749 tg3_reset_task_cancel(tp); 17743 tg3_reset_task_cancel(tp);
17750 tg3_phy_stop(tp); 17744 tg3_phy_stop(tp);
@@ -17786,6 +17780,8 @@ out:
17786 tg3_phy_start(tp); 17780 tg3_phy_start(tp);
17787 } 17781 }
17788 17782
17783unlock:
17784 rtnl_unlock();
17789 return err; 17785 return err;
17790} 17786}
17791 17787
@@ -17794,10 +17790,12 @@ static int tg3_resume(struct device *device)
17794 struct pci_dev *pdev = to_pci_dev(device); 17790 struct pci_dev *pdev = to_pci_dev(device);
17795 struct net_device *dev = pci_get_drvdata(pdev); 17791 struct net_device *dev = pci_get_drvdata(pdev);
17796 struct tg3 *tp = netdev_priv(dev); 17792 struct tg3 *tp = netdev_priv(dev);
17797 int err; 17793 int err = 0;
17794
17795 rtnl_lock();
17798 17796
17799 if (!netif_running(dev)) 17797 if (!netif_running(dev))
17800 return 0; 17798 goto unlock;
17801 17799
17802 netif_device_attach(dev); 17800 netif_device_attach(dev);
17803 17801
@@ -17821,6 +17819,8 @@ out:
17821 if (!err) 17819 if (!err)
17822 tg3_phy_start(tp); 17820 tg3_phy_start(tp);
17823 17821
17822unlock:
17823 rtnl_unlock();
17824 return err; 17824 return err;
17825} 17825}
17826#endif /* CONFIG_PM_SLEEP */ 17826#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ecd2fb3ef695..6c9308850453 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -49,13 +49,15 @@
49#include <asm/io.h> 49#include <asm/io.h>
50#include "cxgb4_uld.h" 50#include "cxgb4_uld.h"
51 51
52#define FW_VERSION_MAJOR 1 52#define T4FW_VERSION_MAJOR 0x01
53#define FW_VERSION_MINOR 4 53#define T4FW_VERSION_MINOR 0x06
54#define FW_VERSION_MICRO 0 54#define T4FW_VERSION_MICRO 0x18
55#define T4FW_VERSION_BUILD 0x00
55 56
56#define FW_VERSION_MAJOR_T5 0 57#define T5FW_VERSION_MAJOR 0x01
57#define FW_VERSION_MINOR_T5 0 58#define T5FW_VERSION_MINOR 0x08
58#define FW_VERSION_MICRO_T5 0 59#define T5FW_VERSION_MICRO 0x1C
60#define T5FW_VERSION_BUILD 0x00
59 61
60#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) 62#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
61 63
@@ -240,6 +242,26 @@ struct pci_params {
240 unsigned char width; 242 unsigned char width;
241}; 243};
242 244
245#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
246#define CHELSIO_CHIP_FPGA 0x100
247#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
248#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
249
250#define CHELSIO_T4 0x4
251#define CHELSIO_T5 0x5
252
253enum chip_type {
254 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
255 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
256 T4_FIRST_REV = T4_A1,
257 T4_LAST_REV = T4_A2,
258
259 T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
260 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
261 T5_FIRST_REV = T5_A0,
262 T5_LAST_REV = T5_A1,
263};
264
243struct adapter_params { 265struct adapter_params {
244 struct tp_params tp; 266 struct tp_params tp;
245 struct vpd_params vpd; 267 struct vpd_params vpd;
@@ -259,7 +281,7 @@ struct adapter_params {
259 281
260 unsigned char nports; /* # of ethernet ports */ 282 unsigned char nports; /* # of ethernet ports */
261 unsigned char portvec; 283 unsigned char portvec;
262 unsigned char rev; /* chip revision */ 284 enum chip_type chip; /* chip code */
263 unsigned char offload; 285 unsigned char offload;
264 286
265 unsigned char bypass; 287 unsigned char bypass;
@@ -267,6 +289,23 @@ struct adapter_params {
267 unsigned int ofldq_wr_cred; 289 unsigned int ofldq_wr_cred;
268}; 290};
269 291
292#include "t4fw_api.h"
293
294#define FW_VERSION(chip) ( \
295 FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \
296 FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \
297 FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \
298 FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD))
299#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
300
301struct fw_info {
302 u8 chip;
303 char *fs_name;
304 char *fw_mod_name;
305 struct fw_hdr fw_hdr;
306};
307
308
270struct trace_params { 309struct trace_params {
271 u32 data[TRACE_LEN / 4]; 310 u32 data[TRACE_LEN / 4];
272 u32 mask[TRACE_LEN / 4]; 311 u32 mask[TRACE_LEN / 4];
@@ -512,25 +551,6 @@ struct sge {
512 551
513struct l2t_data; 552struct l2t_data;
514 553
515#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
516#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
517#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
518
519#define CHELSIO_T4 0x4
520#define CHELSIO_T5 0x5
521
522enum chip_type {
523 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
524 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
525 T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
526 T4_FIRST_REV = T4_A1,
527 T4_LAST_REV = T4_A3,
528
529 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
530 T5_FIRST_REV = T5_A1,
531 T5_LAST_REV = T5_A1,
532};
533
534#ifdef CONFIG_PCI_IOV 554#ifdef CONFIG_PCI_IOV
535 555
536/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial 556/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
@@ -715,12 +735,12 @@ enum {
715 735
716static inline int is_t5(enum chip_type chip) 736static inline int is_t5(enum chip_type chip)
717{ 737{
718 return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV); 738 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
719} 739}
720 740
721static inline int is_t4(enum chip_type chip) 741static inline int is_t4(enum chip_type chip)
722{ 742{
723 return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); 743 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
724} 744}
725 745
726static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) 746static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
@@ -900,7 +920,11 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
900int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 920int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
901unsigned int t4_flash_cfg_addr(struct adapter *adapter); 921unsigned int t4_flash_cfg_addr(struct adapter *adapter);
902int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); 922int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
903int t4_check_fw_version(struct adapter *adapter); 923int t4_get_fw_version(struct adapter *adapter, u32 *vers);
924int t4_get_tp_version(struct adapter *adapter, u32 *vers);
925int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
926 const u8 *fw_data, unsigned int fw_size,
927 struct fw_hdr *card_fw, enum dev_state state, int *reset);
904int t4_prep_adapter(struct adapter *adapter); 928int t4_prep_adapter(struct adapter *adapter);
905int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 929int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
906void t4_fatal_err(struct adapter *adapter); 930void t4_fatal_err(struct adapter *adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8b929eeecd2d..d6b12e035a7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -276,9 +276,9 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
276 { 0, } 276 { 0, }
277}; 277};
278 278
279#define FW_FNAME "cxgb4/t4fw.bin" 279#define FW4_FNAME "cxgb4/t4fw.bin"
280#define FW5_FNAME "cxgb4/t5fw.bin" 280#define FW5_FNAME "cxgb4/t5fw.bin"
281#define FW_CFNAME "cxgb4/t4-config.txt" 281#define FW4_CFNAME "cxgb4/t4-config.txt"
282#define FW5_CFNAME "cxgb4/t5-config.txt" 282#define FW5_CFNAME "cxgb4/t5-config.txt"
283 283
284MODULE_DESCRIPTION(DRV_DESC); 284MODULE_DESCRIPTION(DRV_DESC);
@@ -286,7 +286,7 @@ MODULE_AUTHOR("Chelsio Communications");
286MODULE_LICENSE("Dual BSD/GPL"); 286MODULE_LICENSE("Dual BSD/GPL");
287MODULE_VERSION(DRV_VERSION); 287MODULE_VERSION(DRV_VERSION);
288MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); 288MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
289MODULE_FIRMWARE(FW_FNAME); 289MODULE_FIRMWARE(FW4_FNAME);
290MODULE_FIRMWARE(FW5_FNAME); 290MODULE_FIRMWARE(FW5_FNAME);
291 291
292/* 292/*
@@ -1071,72 +1071,6 @@ freeout: t4_free_sge_resources(adap);
1071} 1071}
1072 1072
1073/* 1073/*
1074 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1075 * started but failed, and a negative errno if flash load couldn't start.
1076 */
1077static int upgrade_fw(struct adapter *adap)
1078{
1079 int ret;
1080 u32 vers, exp_major;
1081 const struct fw_hdr *hdr;
1082 const struct firmware *fw;
1083 struct device *dev = adap->pdev_dev;
1084 char *fw_file_name;
1085
1086 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1087 case CHELSIO_T4:
1088 fw_file_name = FW_FNAME;
1089 exp_major = FW_VERSION_MAJOR;
1090 break;
1091 case CHELSIO_T5:
1092 fw_file_name = FW5_FNAME;
1093 exp_major = FW_VERSION_MAJOR_T5;
1094 break;
1095 default:
1096 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1097 return -EINVAL;
1098 }
1099
1100 ret = request_firmware(&fw, fw_file_name, dev);
1101 if (ret < 0) {
1102 dev_err(dev, "unable to load firmware image %s, error %d\n",
1103 fw_file_name, ret);
1104 return ret;
1105 }
1106
1107 hdr = (const struct fw_hdr *)fw->data;
1108 vers = ntohl(hdr->fw_ver);
1109 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
1110 ret = -EINVAL; /* wrong major version, won't do */
1111 goto out;
1112 }
1113
1114 /*
1115 * If the flash FW is unusable or we found something newer, load it.
1116 */
1117 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
1118 vers > adap->params.fw_vers) {
1119 dev_info(dev, "upgrading firmware ...\n");
1120 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1121 /*force=*/false);
1122 if (!ret)
1123 dev_info(dev,
1124 "firmware upgraded to version %pI4 from %s\n",
1125 &hdr->fw_ver, fw_file_name);
1126 else
1127 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1128 } else {
1129 /*
1130 * Tell our caller that we didn't upgrade the firmware.
1131 */
1132 ret = -EINVAL;
1133 }
1134
1135out: release_firmware(fw);
1136 return ret;
1137}
1138
1139/*
1140 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 1074 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1141 * The allocated memory is cleared. 1075 * The allocated memory is cleared.
1142 */ 1076 */
@@ -1415,7 +1349,7 @@ static int get_sset_count(struct net_device *dev, int sset)
1415static int get_regs_len(struct net_device *dev) 1349static int get_regs_len(struct net_device *dev)
1416{ 1350{
1417 struct adapter *adap = netdev2adap(dev); 1351 struct adapter *adap = netdev2adap(dev);
1418 if (is_t4(adap->chip)) 1352 if (is_t4(adap->params.chip))
1419 return T4_REGMAP_SIZE; 1353 return T4_REGMAP_SIZE;
1420 else 1354 else
1421 return T5_REGMAP_SIZE; 1355 return T5_REGMAP_SIZE;
@@ -1499,7 +1433,7 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1499 data += sizeof(struct port_stats) / sizeof(u64); 1433 data += sizeof(struct port_stats) / sizeof(u64);
1500 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 1434 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1501 data += sizeof(struct queue_port_stats) / sizeof(u64); 1435 data += sizeof(struct queue_port_stats) / sizeof(u64);
1502 if (!is_t4(adapter->chip)) { 1436 if (!is_t4(adapter->params.chip)) {
1503 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); 1437 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1504 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); 1438 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1505 val2 = t4_read_reg(adapter, SGE_STAT_MATCH); 1439 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
@@ -1521,8 +1455,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1521 */ 1455 */
1522static inline unsigned int mk_adap_vers(const struct adapter *ap) 1456static inline unsigned int mk_adap_vers(const struct adapter *ap)
1523{ 1457{
1524 return CHELSIO_CHIP_VERSION(ap->chip) | 1458 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1525 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16); 1459 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1526} 1460}
1527 1461
1528static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, 1462static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -2189,7 +2123,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
2189 static const unsigned int *reg_ranges; 2123 static const unsigned int *reg_ranges;
2190 int arr_size = 0, buf_size = 0; 2124 int arr_size = 0, buf_size = 0;
2191 2125
2192 if (is_t4(ap->chip)) { 2126 if (is_t4(ap->params.chip)) {
2193 reg_ranges = &t4_reg_ranges[0]; 2127 reg_ranges = &t4_reg_ranges[0];
2194 arr_size = ARRAY_SIZE(t4_reg_ranges); 2128 arr_size = ARRAY_SIZE(t4_reg_ranges);
2195 buf_size = T4_REGMAP_SIZE; 2129 buf_size = T4_REGMAP_SIZE;
@@ -2967,7 +2901,7 @@ static int setup_debugfs(struct adapter *adap)
2967 size = t4_read_reg(adap, MA_EDRAM1_BAR); 2901 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2968 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); 2902 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2969 } 2903 }
2970 if (is_t4(adap->chip)) { 2904 if (is_t4(adap->params.chip)) {
2971 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); 2905 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2972 if (i & EXT_MEM_ENABLE) 2906 if (i & EXT_MEM_ENABLE)
2973 add_debugfs_mem(adap, "mc", MEM_MC, 2907 add_debugfs_mem(adap, "mc", MEM_MC,
@@ -3419,7 +3353,7 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3419 3353
3420 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3354 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3421 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); 3355 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3422 if (is_t4(adap->chip)) { 3356 if (is_t4(adap->params.chip)) {
3423 lp_count = G_LP_COUNT(v1); 3357 lp_count = G_LP_COUNT(v1);
3424 hp_count = G_HP_COUNT(v1); 3358 hp_count = G_HP_COUNT(v1);
3425 } else { 3359 } else {
@@ -3588,7 +3522,7 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
3588 do { 3522 do {
3589 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3523 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3590 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); 3524 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3591 if (is_t4(adap->chip)) { 3525 if (is_t4(adap->params.chip)) {
3592 lp_count = G_LP_COUNT(v1); 3526 lp_count = G_LP_COUNT(v1);
3593 hp_count = G_HP_COUNT(v1); 3527 hp_count = G_HP_COUNT(v1);
3594 } else { 3528 } else {
@@ -3708,7 +3642,7 @@ static void process_db_drop(struct work_struct *work)
3708 3642
3709 adap = container_of(work, struct adapter, db_drop_task); 3643 adap = container_of(work, struct adapter, db_drop_task);
3710 3644
3711 if (is_t4(adap->chip)) { 3645 if (is_t4(adap->params.chip)) {
3712 disable_dbs(adap); 3646 disable_dbs(adap);
3713 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); 3647 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3714 drain_db_fifo(adap, 1); 3648 drain_db_fifo(adap, 1);
@@ -3753,7 +3687,7 @@ static void process_db_drop(struct work_struct *work)
3753 3687
3754void t4_db_full(struct adapter *adap) 3688void t4_db_full(struct adapter *adap)
3755{ 3689{
3756 if (is_t4(adap->chip)) { 3690 if (is_t4(adap->params.chip)) {
3757 t4_set_reg_field(adap, SGE_INT_ENABLE3, 3691 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3758 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 3692 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3759 queue_work(workq, &adap->db_full_task); 3693 queue_work(workq, &adap->db_full_task);
@@ -3762,7 +3696,7 @@ void t4_db_full(struct adapter *adap)
3762 3696
3763void t4_db_dropped(struct adapter *adap) 3697void t4_db_dropped(struct adapter *adap)
3764{ 3698{
3765 if (is_t4(adap->chip)) 3699 if (is_t4(adap->params.chip))
3766 queue_work(workq, &adap->db_drop_task); 3700 queue_work(workq, &adap->db_drop_task);
3767} 3701}
3768 3702
@@ -3789,7 +3723,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3789 lli.nchan = adap->params.nports; 3723 lli.nchan = adap->params.nports;
3790 lli.nports = adap->params.nports; 3724 lli.nports = adap->params.nports;
3791 lli.wr_cred = adap->params.ofldq_wr_cred; 3725 lli.wr_cred = adap->params.ofldq_wr_cred;
3792 lli.adapter_type = adap->params.rev; 3726 lli.adapter_type = adap->params.chip;
3793 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); 3727 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3794 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( 3728 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3795 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> 3729 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
@@ -4483,7 +4417,7 @@ static void setup_memwin(struct adapter *adap)
4483 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; 4417 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4484 4418
4485 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ 4419 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4486 if (is_t4(adap->chip)) { 4420 if (is_t4(adap->params.chip)) {
4487 mem_win0_base = bar0 + MEMWIN0_BASE; 4421 mem_win0_base = bar0 + MEMWIN0_BASE;
4488 mem_win1_base = bar0 + MEMWIN1_BASE; 4422 mem_win1_base = bar0 + MEMWIN1_BASE;
4489 mem_win2_base = bar0 + MEMWIN2_BASE; 4423 mem_win2_base = bar0 + MEMWIN2_BASE;
@@ -4668,8 +4602,10 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4668 const struct firmware *cf; 4602 const struct firmware *cf;
4669 unsigned long mtype = 0, maddr = 0; 4603 unsigned long mtype = 0, maddr = 0;
4670 u32 finiver, finicsum, cfcsum; 4604 u32 finiver, finicsum, cfcsum;
4671 int ret, using_flash; 4605 int ret;
4606 int config_issued = 0;
4672 char *fw_config_file, fw_config_file_path[256]; 4607 char *fw_config_file, fw_config_file_path[256];
4608 char *config_name = NULL;
4673 4609
4674 /* 4610 /*
4675 * Reset device if necessary. 4611 * Reset device if necessary.
@@ -4686,9 +4622,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4686 * then use that. Otherwise, use the configuration file stored 4622 * then use that. Otherwise, use the configuration file stored
4687 * in the adapter flash ... 4623 * in the adapter flash ...
4688 */ 4624 */
4689 switch (CHELSIO_CHIP_VERSION(adapter->chip)) { 4625 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4690 case CHELSIO_T4: 4626 case CHELSIO_T4:
4691 fw_config_file = FW_CFNAME; 4627 fw_config_file = FW4_CFNAME;
4692 break; 4628 break;
4693 case CHELSIO_T5: 4629 case CHELSIO_T5:
4694 fw_config_file = FW5_CFNAME; 4630 fw_config_file = FW5_CFNAME;
@@ -4702,13 +4638,16 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4702 4638
4703 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); 4639 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4704 if (ret < 0) { 4640 if (ret < 0) {
4705 using_flash = 1; 4641 config_name = "On FLASH";
4706 mtype = FW_MEMTYPE_CF_FLASH; 4642 mtype = FW_MEMTYPE_CF_FLASH;
4707 maddr = t4_flash_cfg_addr(adapter); 4643 maddr = t4_flash_cfg_addr(adapter);
4708 } else { 4644 } else {
4709 u32 params[7], val[7]; 4645 u32 params[7], val[7];
4710 4646
4711 using_flash = 0; 4647 sprintf(fw_config_file_path,
4648 "/lib/firmware/%s", fw_config_file);
4649 config_name = fw_config_file_path;
4650
4712 if (cf->size >= FLASH_CFG_MAX_SIZE) 4651 if (cf->size >= FLASH_CFG_MAX_SIZE)
4713 ret = -ENOMEM; 4652 ret = -ENOMEM;
4714 else { 4653 else {
@@ -4776,6 +4715,26 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4776 FW_LEN16(caps_cmd)); 4715 FW_LEN16(caps_cmd));
4777 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 4716 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4778 &caps_cmd); 4717 &caps_cmd);
4718
4719 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4720 * Configuration File in FLASH), our last gasp effort is to use the
4721 * Firmware Configuration File which is embedded in the firmware. A
4722 * very few early versions of the firmware didn't have one embedded
4723 * but we can ignore those.
4724 */
4725 if (ret == -ENOENT) {
4726 memset(&caps_cmd, 0, sizeof(caps_cmd));
4727 caps_cmd.op_to_write =
4728 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4729 FW_CMD_REQUEST |
4730 FW_CMD_READ);
4731 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4732 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4733 sizeof(caps_cmd), &caps_cmd);
4734 config_name = "Firmware Default";
4735 }
4736
4737 config_issued = 1;
4779 if (ret < 0) 4738 if (ret < 0)
4780 goto bye; 4739 goto bye;
4781 4740
@@ -4816,7 +4775,6 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4816 if (ret < 0) 4775 if (ret < 0)
4817 goto bye; 4776 goto bye;
4818 4777
4819 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
4820 /* 4778 /*
4821 * Return successfully and note that we're operating with parameters 4779 * Return successfully and note that we're operating with parameters
4822 * not supplied by the driver, rather than from hard-wired 4780 * not supplied by the driver, rather than from hard-wired
@@ -4824,11 +4782,8 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4824 */ 4782 */
4825 adapter->flags |= USING_SOFT_PARAMS; 4783 adapter->flags |= USING_SOFT_PARAMS;
4826 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ 4784 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4827 "Configuration File %s, version %#x, computed checksum %#x\n", 4785 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4828 (using_flash 4786 config_name, finiver, cfcsum);
4829 ? "in device FLASH"
4830 : fw_config_file_path),
4831 finiver, cfcsum);
4832 return 0; 4787 return 0;
4833 4788
4834 /* 4789 /*
@@ -4837,9 +4792,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4837 * want to issue a warning since this is fairly common.) 4792 * want to issue a warning since this is fairly common.)
4838 */ 4793 */
4839bye: 4794bye:
4840 if (ret != -ENOENT) 4795 if (config_issued && ret != -ENOENT)
4841 dev_warn(adapter->pdev_dev, "Configuration file error %d\n", 4796 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4842 -ret); 4797 config_name, -ret);
4843 return ret; 4798 return ret;
4844} 4799}
4845 4800
@@ -5086,6 +5041,47 @@ bye:
5086 return ret; 5041 return ret;
5087} 5042}
5088 5043
5044static struct fw_info fw_info_array[] = {
5045 {
5046 .chip = CHELSIO_T4,
5047 .fs_name = FW4_CFNAME,
5048 .fw_mod_name = FW4_FNAME,
5049 .fw_hdr = {
5050 .chip = FW_HDR_CHIP_T4,
5051 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5052 .intfver_nic = FW_INTFVER(T4, NIC),
5053 .intfver_vnic = FW_INTFVER(T4, VNIC),
5054 .intfver_ri = FW_INTFVER(T4, RI),
5055 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5056 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5057 },
5058 }, {
5059 .chip = CHELSIO_T5,
5060 .fs_name = FW5_CFNAME,
5061 .fw_mod_name = FW5_FNAME,
5062 .fw_hdr = {
5063 .chip = FW_HDR_CHIP_T5,
5064 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5065 .intfver_nic = FW_INTFVER(T5, NIC),
5066 .intfver_vnic = FW_INTFVER(T5, VNIC),
5067 .intfver_ri = FW_INTFVER(T5, RI),
5068 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5069 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5070 },
5071 }
5072};
5073
5074static struct fw_info *find_fw_info(int chip)
5075{
5076 int i;
5077
5078 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5079 if (fw_info_array[i].chip == chip)
5080 return &fw_info_array[i];
5081 }
5082 return NULL;
5083}
5084
5089/* 5085/*
5090 * Phase 0 of initialization: contact FW, obtain config, perform basic init. 5086 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5091 */ 5087 */
@@ -5123,44 +5119,54 @@ static int adap_init0(struct adapter *adap)
5123 * later reporting and B. to warn if the currently loaded firmware 5119 * later reporting and B. to warn if the currently loaded firmware
5124 * is excessively mismatched relative to the driver.) 5120 * is excessively mismatched relative to the driver.)
5125 */ 5121 */
5126 ret = t4_check_fw_version(adap); 5122 t4_get_fw_version(adap, &adap->params.fw_vers);
5127 5123 t4_get_tp_version(adap, &adap->params.tp_vers);
5128 /* The error code -EFAULT is returned by t4_check_fw_version() if
5129 * firmware on adapter < supported firmware. If firmware on adapter
5130 * is too old (not supported by driver) and we're the MASTER_PF set
5131 * adapter state to DEV_STATE_UNINIT to force firmware upgrade
5132 * and reinitialization.
5133 */
5134 if ((adap->flags & MASTER_PF) && ret == -EFAULT)
5135 state = DEV_STATE_UNINIT;
5136 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { 5124 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5137 if (ret == -EINVAL || ret == -EFAULT || ret > 0) { 5125 struct fw_info *fw_info;
5138 if (upgrade_fw(adap) >= 0) { 5126 struct fw_hdr *card_fw;
5139 /* 5127 const struct firmware *fw;
5140 * Note that the chip was reset as part of the 5128 const u8 *fw_data = NULL;
5141 * firmware upgrade so we don't reset it again 5129 unsigned int fw_size = 0;
5142 * below and grab the new firmware version. 5130
5143 */ 5131 /* This is the firmware whose headers the driver was compiled
5144 reset = 0; 5132 * against
5145 ret = t4_check_fw_version(adap); 5133 */
5146 } else 5134 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5147 if (ret == -EFAULT) { 5135 if (fw_info == NULL) {
5148 /* 5136 dev_err(adap->pdev_dev,
5149 * Firmware is old but still might 5137 "unable to get firmware info for chip %d.\n",
5150 * work if we force reinitialization 5138 CHELSIO_CHIP_VERSION(adap->params.chip));
5151 * of the adapter. Ignoring FW upgrade 5139 return -EINVAL;
5152 * failure.
5153 */
5154 dev_warn(adap->pdev_dev,
5155 "Ignoring firmware upgrade "
5156 "failure, and forcing driver "
5157 "to reinitialize the "
5158 "adapter.\n");
5159 ret = 0;
5160 }
5161 } 5140 }
5141
5142 /* allocate memory to read the header of the firmware on the
5143 * card
5144 */
5145 card_fw = t4_alloc_mem(sizeof(*card_fw));
5146
5147 /* Get FW from from /lib/firmware/ */
5148 ret = request_firmware(&fw, fw_info->fw_mod_name,
5149 adap->pdev_dev);
5150 if (ret < 0) {
5151 dev_err(adap->pdev_dev,
5152 "unable to load firmware image %s, error %d\n",
5153 fw_info->fw_mod_name, ret);
5154 } else {
5155 fw_data = fw->data;
5156 fw_size = fw->size;
5157 }
5158
5159 /* upgrade FW logic */
5160 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5161 state, &reset);
5162
5163 /* Cleaning up */
5164 if (fw != NULL)
5165 release_firmware(fw);
5166 t4_free_mem(card_fw);
5167
5162 if (ret < 0) 5168 if (ret < 0)
5163 return ret; 5169 goto bye;
5164 } 5170 }
5165 5171
5166 /* 5172 /*
@@ -5245,7 +5251,7 @@ static int adap_init0(struct adapter *adap)
5245 if (ret == -ENOENT) { 5251 if (ret == -ENOENT) {
5246 dev_info(adap->pdev_dev, 5252 dev_info(adap->pdev_dev,
5247 "No Configuration File present " 5253 "No Configuration File present "
5248 "on adapter. Using hard-wired " 5254 "on adapter. Using hard-wired "
5249 "configuration parameters.\n"); 5255 "configuration parameters.\n");
5250 ret = adap_init0_no_config(adap, reset); 5256 ret = adap_init0_no_config(adap, reset);
5251 } 5257 }
@@ -5787,7 +5793,7 @@ static void print_port_info(const struct net_device *dev)
5787 5793
5788 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", 5794 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5789 adap->params.vpd.id, 5795 adap->params.vpd.id,
5790 CHELSIO_CHIP_RELEASE(adap->params.rev), buf, 5796 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
5791 is_offload(adap) ? "R" : "", adap->params.pci.width, spd, 5797 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5792 (adap->flags & USING_MSIX) ? " MSI-X" : 5798 (adap->flags & USING_MSIX) ? " MSI-X" :
5793 (adap->flags & USING_MSI) ? " MSI" : ""); 5799 (adap->flags & USING_MSI) ? " MSI" : "");
@@ -5910,7 +5916,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5910 if (err) 5916 if (err)
5911 goto out_unmap_bar0; 5917 goto out_unmap_bar0;
5912 5918
5913 if (!is_t4(adapter->chip)) { 5919 if (!is_t4(adapter->params.chip)) {
5914 s_qpp = QUEUESPERPAGEPF1 * adapter->fn; 5920 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5915 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, 5921 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5916 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); 5922 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
@@ -6064,7 +6070,7 @@ sriov:
6064 out_free_dev: 6070 out_free_dev:
6065 free_some_resources(adapter); 6071 free_some_resources(adapter);
6066 out_unmap_bar: 6072 out_unmap_bar:
6067 if (!is_t4(adapter->chip)) 6073 if (!is_t4(adapter->params.chip))
6068 iounmap(adapter->bar2); 6074 iounmap(adapter->bar2);
6069 out_unmap_bar0: 6075 out_unmap_bar0:
6070 iounmap(adapter->regs); 6076 iounmap(adapter->regs);
@@ -6116,7 +6122,7 @@ static void remove_one(struct pci_dev *pdev)
6116 6122
6117 free_some_resources(adapter); 6123 free_some_resources(adapter);
6118 iounmap(adapter->regs); 6124 iounmap(adapter->regs);
6119 if (!is_t4(adapter->chip)) 6125 if (!is_t4(adapter->params.chip))
6120 iounmap(adapter->bar2); 6126 iounmap(adapter->bar2);
6121 kfree(adapter); 6127 kfree(adapter);
6122 pci_disable_pcie_error_reporting(pdev); 6128 pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ac311f5f3eb9..cc380c36e1a8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -509,7 +509,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
509 u32 val; 509 u32 val;
510 if (q->pend_cred >= 8) { 510 if (q->pend_cred >= 8) {
511 val = PIDX(q->pend_cred / 8); 511 val = PIDX(q->pend_cred / 8);
512 if (!is_t4(adap->chip)) 512 if (!is_t4(adap->params.chip))
513 val |= DBTYPE(1); 513 val |= DBTYPE(1);
514 wmb(); 514 wmb();
515 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | 515 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
@@ -847,7 +847,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
847 wmb(); /* write descriptors before telling HW */ 847 wmb(); /* write descriptors before telling HW */
848 spin_lock(&q->db_lock); 848 spin_lock(&q->db_lock);
849 if (!q->db_disabled) { 849 if (!q->db_disabled) {
850 if (is_t4(adap->chip)) { 850 if (is_t4(adap->params.chip)) {
851 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 851 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
852 QID(q->cntxt_id) | PIDX(n)); 852 QID(q->cntxt_id) | PIDX(n));
853 } else { 853 } else {
@@ -1596,7 +1596,7 @@ static noinline int handle_trace_pkt(struct adapter *adap,
1596 return 0; 1596 return 0;
1597 } 1597 }
1598 1598
1599 if (is_t4(adap->chip)) 1599 if (is_t4(adap->params.chip))
1600 __skb_pull(skb, sizeof(struct cpl_trace_pkt)); 1600 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
1601 else 1601 else
1602 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); 1602 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
@@ -1661,7 +1661,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1661 const struct cpl_rx_pkt *pkt; 1661 const struct cpl_rx_pkt *pkt;
1662 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1662 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1663 struct sge *s = &q->adap->sge; 1663 struct sge *s = &q->adap->sge;
1664 int cpl_trace_pkt = is_t4(q->adap->chip) ? 1664 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
1665 CPL_TRACE_PKT : CPL_TRACE_PKT_T5; 1665 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
1666 1666
1667 if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) 1667 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
@@ -2182,7 +2182,7 @@ err:
2182static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) 2182static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2183{ 2183{
2184 q->cntxt_id = id; 2184 q->cntxt_id = id;
2185 if (!is_t4(adap->chip)) { 2185 if (!is_t4(adap->params.chip)) {
2186 unsigned int s_qpp; 2186 unsigned int s_qpp;
2187 unsigned short udb_density; 2187 unsigned short udb_density;
2188 unsigned long qpshift; 2188 unsigned long qpshift;
@@ -2641,7 +2641,7 @@ static int t4_sge_init_hard(struct adapter *adap)
2641 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows 2641 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2642 * and generate an interrupt when this occurs so we can recover. 2642 * and generate an interrupt when this occurs so we can recover.
2643 */ 2643 */
2644 if (is_t4(adap->chip)) { 2644 if (is_t4(adap->params.chip)) {
2645 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, 2645 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2646 V_HP_INT_THRESH(M_HP_INT_THRESH) | 2646 V_HP_INT_THRESH(M_HP_INT_THRESH) |
2647 V_LP_INT_THRESH(M_LP_INT_THRESH), 2647 V_LP_INT_THRESH(M_LP_INT_THRESH),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4cbb2f9850be..74a6fce5a15a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -296,7 +296,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
296 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; 296 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
297 u32 mc_bist_status_rdata, mc_bist_data_pattern; 297 u32 mc_bist_status_rdata, mc_bist_data_pattern;
298 298
299 if (is_t4(adap->chip)) { 299 if (is_t4(adap->params.chip)) {
300 mc_bist_cmd = MC_BIST_CMD; 300 mc_bist_cmd = MC_BIST_CMD;
301 mc_bist_cmd_addr = MC_BIST_CMD_ADDR; 301 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
302 mc_bist_cmd_len = MC_BIST_CMD_LEN; 302 mc_bist_cmd_len = MC_BIST_CMD_LEN;
@@ -349,7 +349,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
349 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; 349 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
350 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; 350 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
351 351
352 if (is_t4(adap->chip)) { 352 if (is_t4(adap->params.chip)) {
353 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); 353 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
354 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); 354 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
355 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); 355 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
@@ -402,7 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
402static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) 402static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
403{ 403{
404 int i; 404 int i;
405 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); 405 u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
406 406
407 /* 407 /*
408 * Setup offset into PCIE memory window. Address must be a 408 * Setup offset into PCIE memory window. Address must be a
@@ -863,104 +863,169 @@ unlock:
863} 863}
864 864
865/** 865/**
866 * get_fw_version - read the firmware version 866 * t4_get_fw_version - read the firmware version
867 * @adapter: the adapter 867 * @adapter: the adapter
868 * @vers: where to place the version 868 * @vers: where to place the version
869 * 869 *
870 * Reads the FW version from flash. 870 * Reads the FW version from flash.
871 */ 871 */
872static int get_fw_version(struct adapter *adapter, u32 *vers) 872int t4_get_fw_version(struct adapter *adapter, u32 *vers)
873{ 873{
874 return t4_read_flash(adapter, adapter->params.sf_fw_start + 874 return t4_read_flash(adapter, FLASH_FW_START +
875 offsetof(struct fw_hdr, fw_ver), 1, vers, 0); 875 offsetof(struct fw_hdr, fw_ver), 1,
876 vers, 0);
876} 877}
877 878
878/** 879/**
879 * get_tp_version - read the TP microcode version 880 * t4_get_tp_version - read the TP microcode version
880 * @adapter: the adapter 881 * @adapter: the adapter
881 * @vers: where to place the version 882 * @vers: where to place the version
882 * 883 *
883 * Reads the TP microcode version from flash. 884 * Reads the TP microcode version from flash.
884 */ 885 */
885static int get_tp_version(struct adapter *adapter, u32 *vers) 886int t4_get_tp_version(struct adapter *adapter, u32 *vers)
886{ 887{
887 return t4_read_flash(adapter, adapter->params.sf_fw_start + 888 return t4_read_flash(adapter, FLASH_FW_START +
888 offsetof(struct fw_hdr, tp_microcode_ver), 889 offsetof(struct fw_hdr, tp_microcode_ver),
889 1, vers, 0); 890 1, vers, 0);
890} 891}
891 892
892/** 893/* Is the given firmware API compatible with the one the driver was compiled
893 * t4_check_fw_version - check if the FW is compatible with this driver 894 * with?
894 * @adapter: the adapter
895 *
896 * Checks if an adapter's FW is compatible with the driver. Returns 0
897 * if there's exact match, a negative error if the version could not be
898 * read or there's a major version mismatch, and a positive value if the
899 * expected major version is found but there's a minor version mismatch.
900 */ 895 */
901int t4_check_fw_version(struct adapter *adapter) 896static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
902{ 897{
903 u32 api_vers[2];
904 int ret, major, minor, micro;
905 int exp_major, exp_minor, exp_micro;
906 898
907 ret = get_fw_version(adapter, &adapter->params.fw_vers); 899 /* short circuit if it's the exact same firmware version */
908 if (!ret) 900 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
909 ret = get_tp_version(adapter, &adapter->params.tp_vers); 901 return 1;
910 if (!ret)
911 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
912 offsetof(struct fw_hdr, intfver_nic),
913 2, api_vers, 1);
914 if (ret)
915 return ret;
916 902
917 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); 903#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
918 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); 904 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
919 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); 905 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
906 return 1;
907#undef SAME_INTF
920 908
921 switch (CHELSIO_CHIP_VERSION(adapter->chip)) { 909 return 0;
922 case CHELSIO_T4: 910}
923 exp_major = FW_VERSION_MAJOR;
924 exp_minor = FW_VERSION_MINOR;
925 exp_micro = FW_VERSION_MICRO;
926 break;
927 case CHELSIO_T5:
928 exp_major = FW_VERSION_MAJOR_T5;
929 exp_minor = FW_VERSION_MINOR_T5;
930 exp_micro = FW_VERSION_MICRO_T5;
931 break;
932 default:
933 dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n",
934 adapter->chip);
935 return -EINVAL;
936 }
937 911
938 memcpy(adapter->params.api_vers, api_vers, 912/* The firmware in the filesystem is usable, but should it be installed?
939 sizeof(adapter->params.api_vers)); 913 * This routine explains itself in detail if it indicates the filesystem
914 * firmware should be installed.
915 */
916static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
917 int k, int c)
918{
919 const char *reason;
940 920
941 if (major < exp_major || (major == exp_major && minor < exp_minor) || 921 if (!card_fw_usable) {
942 (major == exp_major && minor == exp_minor && micro < exp_micro)) { 922 reason = "incompatible or unusable";
943 dev_err(adapter->pdev_dev, 923 goto install;
944 "Card has firmware version %u.%u.%u, minimum "
945 "supported firmware is %u.%u.%u.\n", major, minor,
946 micro, exp_major, exp_minor, exp_micro);
947 return -EFAULT;
948 } 924 }
949 925
950 if (major != exp_major) { /* major mismatch - fail */ 926 if (k > c) {
951 dev_err(adapter->pdev_dev, 927 reason = "older than the version supported with this driver";
952 "card FW has major version %u, driver wants %u\n", 928 goto install;
953 major, exp_major);
954 return -EINVAL;
955 } 929 }
956 930
957 if (minor == exp_minor && micro == exp_micro) 931 return 0;
958 return 0; /* perfect match */ 932
933install:
934 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
935 "installing firmware %u.%u.%u.%u on card.\n",
936 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
937 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
938 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
939 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
959 940
960 /* Minor/micro version mismatch. Report it but often it's OK. */
961 return 1; 941 return 1;
962} 942}
963 943
944int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
945 const u8 *fw_data, unsigned int fw_size,
946 struct fw_hdr *card_fw, enum dev_state state,
947 int *reset)
948{
949 int ret, card_fw_usable, fs_fw_usable;
950 const struct fw_hdr *fs_fw;
951 const struct fw_hdr *drv_fw;
952
953 drv_fw = &fw_info->fw_hdr;
954
955 /* Read the header of the firmware on the card */
956 ret = -t4_read_flash(adap, FLASH_FW_START,
957 sizeof(*card_fw) / sizeof(uint32_t),
958 (uint32_t *)card_fw, 1);
959 if (ret == 0) {
960 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
961 } else {
962 dev_err(adap->pdev_dev,
963 "Unable to read card's firmware header: %d\n", ret);
964 card_fw_usable = 0;
965 }
966
967 if (fw_data != NULL) {
968 fs_fw = (const void *)fw_data;
969 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
970 } else {
971 fs_fw = NULL;
972 fs_fw_usable = 0;
973 }
974
975 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
976 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
977 /* Common case: the firmware on the card is an exact match and
978 * the filesystem one is an exact match too, or the filesystem
979 * one is absent/incompatible.
980 */
981 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
982 should_install_fs_fw(adap, card_fw_usable,
983 be32_to_cpu(fs_fw->fw_ver),
984 be32_to_cpu(card_fw->fw_ver))) {
985 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
986 fw_size, 0);
987 if (ret != 0) {
988 dev_err(adap->pdev_dev,
989 "failed to install firmware: %d\n", ret);
990 goto bye;
991 }
992
993 /* Installed successfully, update the cached header too. */
994 memcpy(card_fw, fs_fw, sizeof(*card_fw));
995 card_fw_usable = 1;
996 *reset = 0; /* already reset as part of load_fw */
997 }
998
999 if (!card_fw_usable) {
1000 uint32_t d, c, k;
1001
1002 d = be32_to_cpu(drv_fw->fw_ver);
1003 c = be32_to_cpu(card_fw->fw_ver);
1004 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1005
1006 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1007 "chip state %d, "
1008 "driver compiled with %d.%d.%d.%d, "
1009 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1010 state,
1011 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1012 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1013 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1014 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1015 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1016 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1017 ret = EINVAL;
1018 goto bye;
1019 }
1020
1021 /* We're using whatever's on the card and it's known to be good. */
1022 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1023 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1024
1025bye:
1026 return ret;
1027}
1028
964/** 1029/**
965 * t4_flash_erase_sectors - erase a range of flash sectors 1030 * t4_flash_erase_sectors - erase a range of flash sectors
966 * @adapter: the adapter 1031 * @adapter: the adapter
@@ -1368,7 +1433,7 @@ static void pcie_intr_handler(struct adapter *adapter)
1368 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1433 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1369 pcie_port_intr_info) + 1434 pcie_port_intr_info) +
1370 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1435 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1371 is_t4(adapter->chip) ? 1436 is_t4(adapter->params.chip) ?
1372 pcie_intr_info : t5_pcie_intr_info); 1437 pcie_intr_info : t5_pcie_intr_info);
1373 1438
1374 if (fat) 1439 if (fat)
@@ -1782,7 +1847,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
1782{ 1847{
1783 u32 v, int_cause_reg; 1848 u32 v, int_cause_reg;
1784 1849
1785 if (is_t4(adap->chip)) 1850 if (is_t4(adap->params.chip))
1786 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); 1851 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1787 else 1852 else
1788 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); 1853 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
@@ -2250,7 +2315,7 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2250 2315
2251#define GET_STAT(name) \ 2316#define GET_STAT(name) \
2252 t4_read_reg64(adap, \ 2317 t4_read_reg64(adap, \
2253 (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ 2318 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
2254 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) 2319 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
2255#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) 2320#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2256 2321
@@ -2332,7 +2397,7 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2332{ 2397{
2333 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 2398 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2334 2399
2335 if (is_t4(adap->chip)) { 2400 if (is_t4(adap->params.chip)) {
2336 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); 2401 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2337 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); 2402 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2338 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2403 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
@@ -2374,7 +2439,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2374 int i; 2439 int i;
2375 u32 port_cfg_reg; 2440 u32 port_cfg_reg;
2376 2441
2377 if (is_t4(adap->chip)) 2442 if (is_t4(adap->params.chip))
2378 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2443 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2379 else 2444 else
2380 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2445 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
@@ -2387,7 +2452,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2387 return -EINVAL; 2452 return -EINVAL;
2388 2453
2389#define EPIO_REG(name) \ 2454#define EPIO_REG(name) \
2390 (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ 2455 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
2391 T5_PORT_REG(port, MAC_PORT_EPIO_##name)) 2456 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
2392 2457
2393 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2458 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
@@ -2474,7 +2539,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2474int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) 2539int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2475{ 2540{
2476 int i, off; 2541 int i, off;
2477 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); 2542 u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
2478 2543
2479 /* Align on a 2KB boundary. 2544 /* Align on a 2KB boundary.
2480 */ 2545 */
@@ -3306,7 +3371,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3306 int i, ret; 3371 int i, ret;
3307 struct fw_vi_mac_cmd c; 3372 struct fw_vi_mac_cmd c;
3308 struct fw_vi_mac_exact *p; 3373 struct fw_vi_mac_exact *p;
3309 unsigned int max_naddr = is_t4(adap->chip) ? 3374 unsigned int max_naddr = is_t4(adap->params.chip) ?
3310 NUM_MPS_CLS_SRAM_L_INSTANCES : 3375 NUM_MPS_CLS_SRAM_L_INSTANCES :
3311 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3376 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3312 3377
@@ -3368,7 +3433,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3368 int ret, mode; 3433 int ret, mode;
3369 struct fw_vi_mac_cmd c; 3434 struct fw_vi_mac_cmd c;
3370 struct fw_vi_mac_exact *p = c.u.exact; 3435 struct fw_vi_mac_exact *p = c.u.exact;
3371 unsigned int max_mac_addr = is_t4(adap->chip) ? 3436 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
3372 NUM_MPS_CLS_SRAM_L_INSTANCES : 3437 NUM_MPS_CLS_SRAM_L_INSTANCES :
3373 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3438 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3374 3439
@@ -3699,13 +3764,14 @@ int t4_prep_adapter(struct adapter *adapter)
3699{ 3764{
3700 int ret, ver; 3765 int ret, ver;
3701 uint16_t device_id; 3766 uint16_t device_id;
3767 u32 pl_rev;
3702 3768
3703 ret = t4_wait_dev_ready(adapter); 3769 ret = t4_wait_dev_ready(adapter);
3704 if (ret < 0) 3770 if (ret < 0)
3705 return ret; 3771 return ret;
3706 3772
3707 get_pci_mode(adapter, &adapter->params.pci); 3773 get_pci_mode(adapter, &adapter->params.pci);
3708 adapter->params.rev = t4_read_reg(adapter, PL_REV); 3774 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
3709 3775
3710 ret = get_flash_params(adapter); 3776 ret = get_flash_params(adapter);
3711 if (ret < 0) { 3777 if (ret < 0) {
@@ -3717,14 +3783,13 @@ int t4_prep_adapter(struct adapter *adapter)
3717 */ 3783 */
3718 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); 3784 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3719 ver = device_id >> 12; 3785 ver = device_id >> 12;
3786 adapter->params.chip = 0;
3720 switch (ver) { 3787 switch (ver) {
3721 case CHELSIO_T4: 3788 case CHELSIO_T4:
3722 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 3789 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
3723 adapter->params.rev);
3724 break; 3790 break;
3725 case CHELSIO_T5: 3791 case CHELSIO_T5:
3726 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 3792 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
3727 adapter->params.rev);
3728 break; 3793 break;
3729 default: 3794 default:
3730 dev_err(adapter->pdev_dev, "Device %d is not supported\n", 3795 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
@@ -3732,9 +3797,6 @@ int t4_prep_adapter(struct adapter *adapter)
3732 return -EINVAL; 3797 return -EINVAL;
3733 } 3798 }
3734 3799
3735 /* Reassign the updated revision field */
3736 adapter->params.rev = adapter->chip;
3737
3738 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 3800 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3739 3801
3740 /* 3802 /*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index ef146c0ba481..0a8205d69d2c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1092,6 +1092,11 @@
1092 1092
1093#define PL_REV 0x1943c 1093#define PL_REV 0x1943c
1094 1094
1095#define S_REV 0
1096#define M_REV 0xfU
1097#define V_REV(x) ((x) << S_REV)
1098#define G_REV(x) (((x) >> S_REV) & M_REV)
1099
1095#define LE_DB_CONFIG 0x19c04 1100#define LE_DB_CONFIG 0x19c04
1096#define HASHEN 0x00100000U 1101#define HASHEN 0x00100000U
1097 1102
@@ -1199,4 +1204,13 @@
1199#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 1204#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
1200#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 1205#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
1201 1206
1207#define A_PL_VF_REV 0x4
1208#define A_PL_VF_WHOAMI 0x0
1209#define A_PL_VF_REVISION 0x8
1210
1211#define S_CHIPID 4
1212#define M_CHIPID 0xfU
1213#define V_CHIPID(x) ((x) << S_CHIPID)
1214#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
1215
1202#endif /* __T4_REGS_H */ 1216#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 6f77ac487743..74fea74ce0aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2157,7 +2157,7 @@ struct fw_debug_cmd {
2157 2157
2158struct fw_hdr { 2158struct fw_hdr {
2159 u8 ver; 2159 u8 ver;
2160 u8 reserved1; 2160 u8 chip; /* terminator chip type */
2161 __be16 len512; /* bin length in units of 512-bytes */ 2161 __be16 len512; /* bin length in units of 512-bytes */
2162 __be32 fw_ver; /* firmware version */ 2162 __be32 fw_ver; /* firmware version */
2163 __be32 tp_microcode_ver; 2163 __be32 tp_microcode_ver;
@@ -2176,6 +2176,11 @@ struct fw_hdr {
2176 __be32 reserved6[23]; 2176 __be32 reserved6[23];
2177}; 2177};
2178 2178
2179enum fw_hdr_chip {
2180 FW_HDR_CHIP_T4,
2181 FW_HDR_CHIP_T5
2182};
2183
2179#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) 2184#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
2180#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) 2185#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
2181#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) 2186#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index be5c7ef6ca93..68eaa9c88c7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -344,7 +344,6 @@ struct adapter {
344 unsigned long registered_device_map; 344 unsigned long registered_device_map;
345 unsigned long open_device_map; 345 unsigned long open_device_map;
346 unsigned long flags; 346 unsigned long flags;
347 enum chip_type chip;
348 struct adapter_params params; 347 struct adapter_params params;
349 348
350 /* queue and interrupt resources */ 349 /* queue and interrupt resources */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 5f90ec5f7519..0899c0983594 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1064,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1064 /* 1064 /*
1065 * Chip version 4, revision 0x3f (cxgb4vf). 1065 * Chip version 4, revision 0x3f (cxgb4vf).
1066 */ 1066 */
1067 return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10); 1067 return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1068} 1068}
1069 1069
1070/* 1070/*
@@ -1551,9 +1551,13 @@ static void cxgb4vf_get_regs(struct net_device *dev,
1551 reg_block_dump(adapter, regbuf, 1551 reg_block_dump(adapter, regbuf,
1552 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, 1552 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1553 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); 1553 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1554
1555 /* T5 adds new registers in the PL Register map.
1556 */
1554 reg_block_dump(adapter, regbuf, 1557 reg_block_dump(adapter, regbuf,
1555 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, 1558 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1556 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST); 1559 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1560 ? A_PL_VF_WHOAMI : A_PL_VF_REVISION));
1557 reg_block_dump(adapter, regbuf, 1561 reg_block_dump(adapter, regbuf,
1558 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, 1562 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1559 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); 1563 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
@@ -2087,6 +2091,7 @@ static int adap_init0(struct adapter *adapter)
2087 unsigned int ethqsets; 2091 unsigned int ethqsets;
2088 int err; 2092 int err;
2089 u32 param, val = 0; 2093 u32 param, val = 0;
2094 unsigned int chipid;
2090 2095
2091 /* 2096 /*
2092 * Wait for the device to become ready before proceeding ... 2097 * Wait for the device to become ready before proceeding ...
@@ -2114,12 +2119,14 @@ static int adap_init0(struct adapter *adapter)
2114 return err; 2119 return err;
2115 } 2120 }
2116 2121
2122 adapter->params.chip = 0;
2117 switch (adapter->pdev->device >> 12) { 2123 switch (adapter->pdev->device >> 12) {
2118 case CHELSIO_T4: 2124 case CHELSIO_T4:
2119 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); 2125 adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
2120 break; 2126 break;
2121 case CHELSIO_T5: 2127 case CHELSIO_T5:
2122 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0); 2128 chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
2129 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
2123 break; 2130 break;
2124 } 2131 }
2125 2132
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8475c4cda9e4..0a89963c48ce 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -537,7 +537,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
537 */ 537 */
538 if (fl->pend_cred >= FL_PER_EQ_UNIT) { 538 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
539 val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); 539 val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
540 if (!is_t4(adapter->chip)) 540 if (!is_t4(adapter->params.chip))
541 val |= DBTYPE(1); 541 val |= DBTYPE(1);
542 wmb(); 542 wmb();
543 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, 543 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 53cbfed21d0b..61362450d05b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -39,21 +39,28 @@
39#include "../cxgb4/t4fw_api.h" 39#include "../cxgb4/t4fw_api.h"
40 40
41#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) 41#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
42#define CHELSIO_CHIP_VERSION(code) ((code) >> 4) 42#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
43#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) 43#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
44 44
45/* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where:
46 *
47 * V = "4" for T4; "5" for T5, etc. or
48 * = "a" for T4 FPGA; "b" for T4 FPGA, etc.
49 * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
50 * PP = adapter product designation
51 */
45#define CHELSIO_T4 0x4 52#define CHELSIO_T4 0x4
46#define CHELSIO_T5 0x5 53#define CHELSIO_T5 0x5
47 54
48enum chip_type { 55enum chip_type {
49 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), 56 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
50 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), 57 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
51 T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
52 T4_FIRST_REV = T4_A1, 58 T4_FIRST_REV = T4_A1,
53 T4_LAST_REV = T4_A3, 59 T4_LAST_REV = T4_A2,
54 60
55 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), 61 T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
56 T5_FIRST_REV = T5_A1, 62 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
63 T5_FIRST_REV = T5_A0,
57 T5_LAST_REV = T5_A1, 64 T5_LAST_REV = T5_A1,
58}; 65};
59 66
@@ -203,6 +210,7 @@ struct adapter_params {
203 struct vpd_params vpd; /* Vital Product Data */ 210 struct vpd_params vpd; /* Vital Product Data */
204 struct rss_params rss; /* Receive Side Scaling */ 211 struct rss_params rss; /* Receive Side Scaling */
205 struct vf_resources vfres; /* Virtual Function Resource limits */ 212 struct vf_resources vfres; /* Virtual Function Resource limits */
213 enum chip_type chip; /* chip code */
206 u8 nports; /* # of Ethernet "ports" */ 214 u8 nports; /* # of Ethernet "ports" */
207}; 215};
208 216
@@ -253,7 +261,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
253 261
254static inline int is_t4(enum chip_type chip) 262static inline int is_t4(enum chip_type chip)
255{ 263{
256 return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); 264 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
257} 265}
258 266
259int t4vf_wait_dev_ready(struct adapter *); 267int t4vf_wait_dev_ready(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 9f96dc3bb112..d958c44341b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1027,7 +1027,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1027 unsigned nfilters = 0; 1027 unsigned nfilters = 0;
1028 unsigned int rem = naddr; 1028 unsigned int rem = naddr;
1029 struct fw_vi_mac_cmd cmd, rpl; 1029 struct fw_vi_mac_cmd cmd, rpl;
1030 unsigned int max_naddr = is_t4(adapter->chip) ? 1030 unsigned int max_naddr = is_t4(adapter->params.chip) ?
1031 NUM_MPS_CLS_SRAM_L_INSTANCES : 1031 NUM_MPS_CLS_SRAM_L_INSTANCES :
1032 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1032 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1033 1033
@@ -1121,7 +1121,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1121 struct fw_vi_mac_exact *p = &cmd.u.exact[0]; 1121 struct fw_vi_mac_exact *p = &cmd.u.exact[0];
1122 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1122 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1123 u.exact[1]), 16); 1123 u.exact[1]), 16);
1124 unsigned int max_naddr = is_t4(adapter->chip) ? 1124 unsigned int max_naddr = is_t4(adapter->params.chip) ?
1125 NUM_MPS_CLS_SRAM_L_INSTANCES : 1125 NUM_MPS_CLS_SRAM_L_INSTANCES :
1126 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1126 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1127 1127
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f4825db5d179..5878df619b53 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -503,6 +503,7 @@ struct be_adapter {
503}; 503};
504 504
505#define be_physfn(adapter) (!adapter->virtfn) 505#define be_physfn(adapter) (!adapter->virtfn)
506#define be_virtfn(adapter) (adapter->virtfn)
506#define sriov_enabled(adapter) (adapter->num_vfs > 0) 507#define sriov_enabled(adapter) (adapter->num_vfs > 0)
507#define sriov_want(adapter) (be_physfn(adapter) && \ 508#define sriov_want(adapter) (be_physfn(adapter) && \
508 (num_vfs || pci_num_vf(adapter->pdev))) 509 (num_vfs || pci_num_vf(adapter->pdev)))
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 7fb0edfe3d24..e0e8bc1ef14c 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1032,6 +1032,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1032 } else { 1032 } else {
1033 req->hdr.version = 2; 1033 req->hdr.version = 2;
1034 req->page_size = 1; /* 1 for 4K */ 1034 req->page_size = 1; /* 1 for 4K */
1035
1036 /* coalesce-wm field in this cmd is not relevant to Lancer.
1037 * Lancer uses COMMON_MODIFY_CQ to set this field
1038 */
1039 if (!lancer_chip(adapter))
1040 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1041 ctxt, coalesce_wm);
1035 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1042 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1036 no_delay); 1043 no_delay);
1037 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1044 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
@@ -1758,7 +1765,7 @@ err:
1758 1765
1759/* Uses sycnhronous mcc */ 1766/* Uses sycnhronous mcc */
1760int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1767int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1761 u32 num, bool untagged, bool promiscuous) 1768 u32 num, bool promiscuous)
1762{ 1769{
1763 struct be_mcc_wrb *wrb; 1770 struct be_mcc_wrb *wrb;
1764 struct be_cmd_req_vlan_config *req; 1771 struct be_cmd_req_vlan_config *req;
@@ -1778,7 +1785,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1778 1785
1779 req->interface_id = if_id; 1786 req->interface_id = if_id;
1780 req->promiscuous = promiscuous; 1787 req->promiscuous = promiscuous;
1781 req->untagged = untagged; 1788 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1782 req->num_vlan = num; 1789 req->num_vlan = num;
1783 if (!promiscuous) { 1790 if (!promiscuous) {
1784 memcpy(req->normal_vlan, vtag_array, 1791 memcpy(req->normal_vlan, vtag_array,
@@ -1847,7 +1854,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1847 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1854 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1848 } 1855 }
1849 1856
1857 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1858 req->if_flags_mask) {
1859 dev_warn(&adapter->pdev->dev,
1860 "Cannot set rx filter flags 0x%x\n",
1861 req->if_flags_mask);
1862 dev_warn(&adapter->pdev->dev,
1863 "Interface is capable of 0x%x flags only\n",
1864 be_if_cap_flags(adapter));
1865 }
1866 req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
1867
1850 status = be_mcc_notify_wait(adapter); 1868 status = be_mcc_notify_wait(adapter);
1869
1851err: 1870err:
1852 spin_unlock_bh(&adapter->mcc_lock); 1871 spin_unlock_bh(&adapter->mcc_lock);
1853 return status; 1872 return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index edf3e8a0ff83..0075686276aa 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1984,7 +1984,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1984 char *fw_on_flash); 1984 char *fw_on_flash);
1985int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); 1985int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
1986int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1986int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1987 u32 num, bool untagged, bool promiscuous); 1987 u32 num, bool promiscuous);
1988int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); 1988int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
1989int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); 1989int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
1990int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); 1990int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3e2162121601..dc88782185f2 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -64,6 +64,9 @@
64#define SLIPORT_ERROR_NO_RESOURCE1 0x2 64#define SLIPORT_ERROR_NO_RESOURCE1 0x2
65#define SLIPORT_ERROR_NO_RESOURCE2 0x9 65#define SLIPORT_ERROR_NO_RESOURCE2 0x9
66 66
67#define SLIPORT_ERROR_FW_RESET1 0x2
68#define SLIPORT_ERROR_FW_RESET2 0x0
69
67/********* Memory BAR register ************/ 70/********* Memory BAR register ************/
68#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 71#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
69/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 72/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eaecaadfa8c5..0fde69d5cb6a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1079,7 +1079,7 @@ static int be_vid_config(struct be_adapter *adapter)
1079 vids[num++] = cpu_to_le16(i); 1079 vids[num++] = cpu_to_le16(i);
1080 1080
1081 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1081 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1082 vids, num, 1, 0); 1082 vids, num, 0);
1083 1083
1084 if (status) { 1084 if (status) {
1085 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1085 /* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter)
2464 */ 2464 */
2465 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2465 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2466 adapter->hw_error = true; 2466 adapter->hw_error = true;
2467 dev_err(&adapter->pdev->dev, 2467 /* Do not log error messages if its a FW reset */
2468 "Error detected in the card\n"); 2468 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2469 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2470 dev_info(&adapter->pdev->dev,
2471 "Firmware update in progress\n");
2472 return;
2473 } else {
2474 dev_err(&adapter->pdev->dev,
2475 "Error detected in the card\n");
2476 }
2469 } 2477 }
2470 2478
2471 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2479 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2658,8 +2666,8 @@ static int be_close(struct net_device *netdev)
2658 2666
2659 be_roce_dev_close(adapter); 2667 be_roce_dev_close(adapter);
2660 2668
2661 for_all_evt_queues(adapter, eqo, i) { 2669 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2662 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 2670 for_all_evt_queues(adapter, eqo, i) {
2663 napi_disable(&eqo->napi); 2671 napi_disable(&eqo->napi);
2664 be_disable_busy_poll(eqo); 2672 be_disable_busy_poll(eqo);
2665 } 2673 }
@@ -2676,6 +2684,11 @@ static int be_close(struct net_device *netdev)
2676 2684
2677 be_rx_qs_destroy(adapter); 2685 be_rx_qs_destroy(adapter);
2678 2686
2687 for (i = 1; i < (adapter->uc_macs + 1); i++)
2688 be_cmd_pmac_del(adapter, adapter->if_handle,
2689 adapter->pmac_id[i], 0);
2690 adapter->uc_macs = 0;
2691
2679 for_all_evt_queues(adapter, eqo, i) { 2692 for_all_evt_queues(adapter, eqo, i) {
2680 if (msix_enabled(adapter)) 2693 if (msix_enabled(adapter))
2681 synchronize_irq(be_msix_vec_get(adapter, eqo)); 2694 synchronize_irq(be_msix_vec_get(adapter, eqo));
@@ -2927,28 +2940,35 @@ static void be_cancel_worker(struct be_adapter *adapter)
2927 } 2940 }
2928} 2941}
2929 2942
2930static int be_clear(struct be_adapter *adapter) 2943static void be_mac_clear(struct be_adapter *adapter)
2931{ 2944{
2932 int i; 2945 int i;
2933 2946
2947 if (adapter->pmac_id) {
2948 for (i = 0; i < (adapter->uc_macs + 1); i++)
2949 be_cmd_pmac_del(adapter, adapter->if_handle,
2950 adapter->pmac_id[i], 0);
2951 adapter->uc_macs = 0;
2952
2953 kfree(adapter->pmac_id);
2954 adapter->pmac_id = NULL;
2955 }
2956}
2957
2958static int be_clear(struct be_adapter *adapter)
2959{
2934 be_cancel_worker(adapter); 2960 be_cancel_worker(adapter);
2935 2961
2936 if (sriov_enabled(adapter)) 2962 if (sriov_enabled(adapter))
2937 be_vf_clear(adapter); 2963 be_vf_clear(adapter);
2938 2964
2939 /* delete the primary mac along with the uc-mac list */ 2965 /* delete the primary mac along with the uc-mac list */
2940 for (i = 0; i < (adapter->uc_macs + 1); i++) 2966 be_mac_clear(adapter);
2941 be_cmd_pmac_del(adapter, adapter->if_handle,
2942 adapter->pmac_id[i], 0);
2943 adapter->uc_macs = 0;
2944 2967
2945 be_cmd_if_destroy(adapter, adapter->if_handle, 0); 2968 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2946 2969
2947 be_clear_queues(adapter); 2970 be_clear_queues(adapter);
2948 2971
2949 kfree(adapter->pmac_id);
2950 adapter->pmac_id = NULL;
2951
2952 be_msix_disable(adapter); 2972 be_msix_disable(adapter);
2953 return 0; 2973 return 0;
2954} 2974}
@@ -3248,12 +3268,10 @@ static int be_mac_setup(struct be_adapter *adapter)
3248 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); 3268 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3249 } 3269 }
3250 3270
3251 /* On BE3 VFs this cmd may fail due to lack of privilege. 3271 /* For BE3-R VFs, the PF programs the initial MAC address */
3252 * Ignore the failure as in this case pmac_id is fetched 3272 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3253 * in the IFACE_CREATE cmd. 3273 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3254 */ 3274 &adapter->pmac_id[0], 0);
3255 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3256 &adapter->pmac_id[0], 0);
3257 return 0; 3275 return 0;
3258} 3276}
3259 3277
@@ -3809,6 +3827,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
3809 } 3827 }
3810 3828
3811 if (change_status == LANCER_FW_RESET_NEEDED) { 3829 if (change_status == LANCER_FW_RESET_NEEDED) {
3830 dev_info(&adapter->pdev->dev,
3831 "Resetting adapter to activate new FW\n");
3812 status = lancer_physdev_ctrl(adapter, 3832 status = lancer_physdev_ctrl(adapter,
3813 PHYSDEV_CONTROL_FW_RESET_MASK); 3833 PHYSDEV_CONTROL_FW_RESET_MASK);
3814 if (status) { 3834 if (status) {
@@ -4360,13 +4380,13 @@ static int lancer_recover_func(struct be_adapter *adapter)
4360 goto err; 4380 goto err;
4361 } 4381 }
4362 4382
4363 dev_err(dev, "Error recovery successful\n"); 4383 dev_err(dev, "Adapter recovery successful\n");
4364 return 0; 4384 return 0;
4365err: 4385err:
4366 if (status == -EAGAIN) 4386 if (status == -EAGAIN)
4367 dev_err(dev, "Waiting for resource provisioning\n"); 4387 dev_err(dev, "Waiting for resource provisioning\n");
4368 else 4388 else
4369 dev_err(dev, "Error recovery failed\n"); 4389 dev_err(dev, "Adapter recovery failed\n");
4370 4390
4371 return status; 4391 return status;
4372} 4392}
@@ -4594,6 +4614,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4594 if (adapter->wol) 4614 if (adapter->wol)
4595 be_setup_wol(adapter, true); 4615 be_setup_wol(adapter, true);
4596 4616
4617 be_intr_set(adapter, false);
4597 cancel_delayed_work_sync(&adapter->func_recovery_work); 4618 cancel_delayed_work_sync(&adapter->func_recovery_work);
4598 4619
4599 netif_device_detach(netdev); 4620 netif_device_detach(netdev);
@@ -4629,6 +4650,7 @@ static int be_resume(struct pci_dev *pdev)
4629 if (status) 4650 if (status)
4630 return status; 4651 return status;
4631 4652
4653 be_intr_set(adapter, true);
4632 /* tell fw we're ready to fire cmds */ 4654 /* tell fw we're ready to fire cmds */
4633 status = be_cmd_fw_init(adapter); 4655 status = be_cmd_fw_init(adapter);
4634 if (status) 4656 if (status)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index b2793b91cc55..e7c8b749c5a5 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -98,10 +98,6 @@ static void set_multicast_list(struct net_device *ndev);
98 * detected as not set during a prior frame transmission, then the 98 * detected as not set during a prior frame transmission, then the
99 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs 99 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
100 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in 100 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
101 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
102 * detected as not set during a prior frame transmission, then the
103 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
104 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
105 * frames not being transmitted until there is a 0-to-1 transition on 101 * frames not being transmitted until there is a 0-to-1 transition on
106 * ENET_TDAR[TDAR]. 102 * ENET_TDAR[TDAR].
107 */ 103 */
@@ -385,8 +381,15 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
385 * data. 381 * data.
386 */ 382 */
387 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, 383 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
388 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 384 skb->len, DMA_TO_DEVICE);
389 385 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
386 bdp->cbd_bufaddr = 0;
387 fep->tx_skbuff[index] = NULL;
388 dev_kfree_skb_any(skb);
389 if (net_ratelimit())
390 netdev_err(ndev, "Tx DMA memory map failed\n");
391 return NETDEV_TX_OK;
392 }
390 /* Send it on its way. Tell FEC it's ready, interrupt when done, 393 /* Send it on its way. Tell FEC it's ready, interrupt when done,
391 * it's the last BD of the frame, and to put the CRC on the end. 394 * it's the last BD of the frame, and to put the CRC on the end.
392 */ 395 */
@@ -772,11 +775,10 @@ fec_enet_tx(struct net_device *ndev)
772 else 775 else
773 index = bdp - fep->tx_bd_base; 776 index = bdp - fep->tx_bd_base;
774 777
775 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
776 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
777 bdp->cbd_bufaddr = 0;
778
779 skb = fep->tx_skbuff[index]; 778 skb = fep->tx_skbuff[index];
779 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
780 DMA_TO_DEVICE);
781 bdp->cbd_bufaddr = 0;
780 782
781 /* Check for errors. */ 783 /* Check for errors. */
782 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 784 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -861,6 +863,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
861 struct bufdesc_ex *ebdp = NULL; 863 struct bufdesc_ex *ebdp = NULL;
862 bool vlan_packet_rcvd = false; 864 bool vlan_packet_rcvd = false;
863 u16 vlan_tag; 865 u16 vlan_tag;
866 int index = 0;
864 867
865#ifdef CONFIG_M532x 868#ifdef CONFIG_M532x
866 flush_cache_all(); 869 flush_cache_all();
@@ -916,10 +919,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
916 ndev->stats.rx_packets++; 919 ndev->stats.rx_packets++;
917 pkt_len = bdp->cbd_datlen; 920 pkt_len = bdp->cbd_datlen;
918 ndev->stats.rx_bytes += pkt_len; 921 ndev->stats.rx_bytes += pkt_len;
919 data = (__u8*)__va(bdp->cbd_bufaddr);
920 922
921 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 923 if (fep->bufdesc_ex)
922 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); 924 index = (struct bufdesc_ex *)bdp -
925 (struct bufdesc_ex *)fep->rx_bd_base;
926 else
927 index = bdp - fep->rx_bd_base;
928 data = fep->rx_skbuff[index]->data;
929 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
930 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
923 931
924 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 932 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
925 swap_buffer(data, pkt_len); 933 swap_buffer(data, pkt_len);
@@ -999,8 +1007,8 @@ fec_enet_rx(struct net_device *ndev, int budget)
999 napi_gro_receive(&fep->napi, skb); 1007 napi_gro_receive(&fep->napi, skb);
1000 } 1008 }
1001 1009
1002 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, 1010 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1003 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); 1011 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1004rx_processing_done: 1012rx_processing_done:
1005 /* Clear the status flags for this buffer */ 1013 /* Clear the status flags for this buffer */
1006 status &= ~BD_ENET_RX_STATS; 1014 status &= ~BD_ENET_RX_STATS;
@@ -1719,6 +1727,12 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1719 1727
1720 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, 1728 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
1721 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1729 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1730 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
1731 fec_enet_free_buffers(ndev);
1732 if (net_ratelimit())
1733 netdev_err(ndev, "Rx DMA memory map failed\n");
1734 return -ENOMEM;
1735 }
1722 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1736 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1723 1737
1724 if (fep->bufdesc_ex) { 1738 if (fep->bufdesc_ex) {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 2d1c6bdd3618..7628e0fd8455 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3033 3033
3034 dev->hw_features = NETIF_F_SG | NETIF_F_TSO | 3034 dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
3035 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; 3035 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
3036 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | 3036 dev->features = NETIF_F_SG | NETIF_F_TSO |
3037 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | 3037 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
3038 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 3038 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3039 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; 3039 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 58c147271a36..f9313b36c887 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -83,6 +83,11 @@ struct e1000_adapter;
83 83
84#define E1000_MAX_INTR 10 84#define E1000_MAX_INTR 10
85 85
86/*
87 * Count for polling __E1000_RESET condition every 10-20msec.
88 */
89#define E1000_CHECK_RESET_COUNT 50
90
86/* TX/RX descriptor defines */ 91/* TX/RX descriptor defines */
87#define E1000_DEFAULT_TXD 256 92#define E1000_DEFAULT_TXD 256
88#define E1000_MAX_TXD 256 93#define E1000_MAX_TXD 256
@@ -312,8 +317,6 @@ struct e1000_adapter {
312 struct delayed_work watchdog_task; 317 struct delayed_work watchdog_task;
313 struct delayed_work fifo_stall_task; 318 struct delayed_work fifo_stall_task;
314 struct delayed_work phy_info_task; 319 struct delayed_work phy_info_task;
315
316 struct mutex mutex;
317}; 320};
318 321
319enum e1000_state_t { 322enum e1000_state_t {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index e38622825fa7..46e6544ed1b7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
494{ 494{
495 set_bit(__E1000_DOWN, &adapter->flags); 495 set_bit(__E1000_DOWN, &adapter->flags);
496 496
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
500
501 cancel_delayed_work_sync(&adapter->watchdog_task); 497 cancel_delayed_work_sync(&adapter->watchdog_task);
498
499 /*
500 * Since the watchdog task can reschedule other tasks, we should cancel
501 * it first, otherwise we can run into the situation when a work is
502 * still running after the adapter has been turned down.
503 */
504
502 cancel_delayed_work_sync(&adapter->phy_info_task); 505 cancel_delayed_work_sync(&adapter->phy_info_task);
503 cancel_delayed_work_sync(&adapter->fifo_stall_task); 506 cancel_delayed_work_sync(&adapter->fifo_stall_task);
507
508 /* Only kill reset task if adapter is not resetting */
509 if (!test_bit(__E1000_RESETTING, &adapter->flags))
510 cancel_work_sync(&adapter->reset_task);
504} 511}
505 512
506void e1000_down(struct e1000_adapter *adapter) 513void e1000_down(struct e1000_adapter *adapter)
@@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter)
544 e1000_clean_all_rx_rings(adapter); 551 e1000_clean_all_rx_rings(adapter);
545} 552}
546 553
547static void e1000_reinit_safe(struct e1000_adapter *adapter)
548{
549 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
550 msleep(1);
551 mutex_lock(&adapter->mutex);
552 e1000_down(adapter);
553 e1000_up(adapter);
554 mutex_unlock(&adapter->mutex);
555 clear_bit(__E1000_RESETTING, &adapter->flags);
556}
557
558void e1000_reinit_locked(struct e1000_adapter *adapter) 554void e1000_reinit_locked(struct e1000_adapter *adapter)
559{ 555{
560 /* if rtnl_lock is not held the call path is bogus */
561 ASSERT_RTNL();
562 WARN_ON(in_interrupt()); 556 WARN_ON(in_interrupt());
563 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 557 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
564 msleep(1); 558 msleep(1);
@@ -1316,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
1316 e1000_irq_disable(adapter); 1310 e1000_irq_disable(adapter);
1317 1311
1318 spin_lock_init(&adapter->stats_lock); 1312 spin_lock_init(&adapter->stats_lock);
1319 mutex_init(&adapter->mutex);
1320 1313
1321 set_bit(__E1000_DOWN, &adapter->flags); 1314 set_bit(__E1000_DOWN, &adapter->flags);
1322 1315
@@ -1440,6 +1433,10 @@ static int e1000_close(struct net_device *netdev)
1440{ 1433{
1441 struct e1000_adapter *adapter = netdev_priv(netdev); 1434 struct e1000_adapter *adapter = netdev_priv(netdev);
1442 struct e1000_hw *hw = &adapter->hw; 1435 struct e1000_hw *hw = &adapter->hw;
1436 int count = E1000_CHECK_RESET_COUNT;
1437
1438 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1439 usleep_range(10000, 20000);
1443 1440
1444 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1441 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1445 e1000_down(adapter); 1442 e1000_down(adapter);
@@ -2325,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work)
2325 struct e1000_adapter *adapter = container_of(work, 2322 struct e1000_adapter *adapter = container_of(work,
2326 struct e1000_adapter, 2323 struct e1000_adapter,
2327 phy_info_task.work); 2324 phy_info_task.work);
2328 if (test_bit(__E1000_DOWN, &adapter->flags)) 2325
2329 return;
2330 mutex_lock(&adapter->mutex);
2331 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2326 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2332 mutex_unlock(&adapter->mutex);
2333} 2327}
2334 2328
2335/** 2329/**
@@ -2345,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2345 struct net_device *netdev = adapter->netdev; 2339 struct net_device *netdev = adapter->netdev;
2346 u32 tctl; 2340 u32 tctl;
2347 2341
2348 if (test_bit(__E1000_DOWN, &adapter->flags))
2349 return;
2350 mutex_lock(&adapter->mutex);
2351 if (atomic_read(&adapter->tx_fifo_stall)) { 2342 if (atomic_read(&adapter->tx_fifo_stall)) {
2352 if ((er32(TDT) == er32(TDH)) && 2343 if ((er32(TDT) == er32(TDH)) &&
2353 (er32(TDFT) == er32(TDFH)) && 2344 (er32(TDFT) == er32(TDFH)) &&
@@ -2368,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2368 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2359 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2369 } 2360 }
2370 } 2361 }
2371 mutex_unlock(&adapter->mutex);
2372} 2362}
2373 2363
2374bool e1000_has_link(struct e1000_adapter *adapter) 2364bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2422,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work)
2422 struct e1000_tx_ring *txdr = adapter->tx_ring; 2412 struct e1000_tx_ring *txdr = adapter->tx_ring;
2423 u32 link, tctl; 2413 u32 link, tctl;
2424 2414
2425 if (test_bit(__E1000_DOWN, &adapter->flags))
2426 return;
2427
2428 mutex_lock(&adapter->mutex);
2429 link = e1000_has_link(adapter); 2415 link = e1000_has_link(adapter);
2430 if ((netif_carrier_ok(netdev)) && link) 2416 if ((netif_carrier_ok(netdev)) && link)
2431 goto link_up; 2417 goto link_up;
@@ -2516,7 +2502,7 @@ link_up:
2516 adapter->tx_timeout_count++; 2502 adapter->tx_timeout_count++;
2517 schedule_work(&adapter->reset_task); 2503 schedule_work(&adapter->reset_task);
2518 /* exit immediately since reset is imminent */ 2504 /* exit immediately since reset is imminent */
2519 goto unlock; 2505 return;
2520 } 2506 }
2521 } 2507 }
2522 2508
@@ -2544,9 +2530,6 @@ link_up:
2544 /* Reschedule the task */ 2530 /* Reschedule the task */
2545 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2531 if (!test_bit(__E1000_DOWN, &adapter->flags))
2546 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2532 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2547
2548unlock:
2549 mutex_unlock(&adapter->mutex);
2550} 2533}
2551 2534
2552enum latency_range { 2535enum latency_range {
@@ -3495,10 +3478,8 @@ static void e1000_reset_task(struct work_struct *work)
3495 struct e1000_adapter *adapter = 3478 struct e1000_adapter *adapter =
3496 container_of(work, struct e1000_adapter, reset_task); 3479 container_of(work, struct e1000_adapter, reset_task);
3497 3480
3498 if (test_bit(__E1000_DOWN, &adapter->flags))
3499 return;
3500 e_err(drv, "Reset adapter\n"); 3481 e_err(drv, "Reset adapter\n");
3501 e1000_reinit_safe(adapter); 3482 e1000_reinit_locked(adapter);
3502} 3483}
3503 3484
3504/** 3485/**
@@ -4963,6 +4944,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4963 netif_device_detach(netdev); 4944 netif_device_detach(netdev);
4964 4945
4965 if (netif_running(netdev)) { 4946 if (netif_running(netdev)) {
4947 int count = E1000_CHECK_RESET_COUNT;
4948
4949 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
4950 usleep_range(10000, 20000);
4951
4966 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 4952 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4967 e1000_down(adapter); 4953 e1000_down(adapter);
4968 } 4954 }
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index aedd5736a87d..8d3945ab7334 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3482,10 +3482,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
3482 * specified. Matching the kind of event packet is not supported, with the 3482 * specified. Matching the kind of event packet is not supported, with the
3483 * exception of "all V2 events regardless of level 2 or 4". 3483 * exception of "all V2 events regardless of level 2 or 4".
3484 **/ 3484 **/
3485static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) 3485static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
3486 struct hwtstamp_config *config)
3486{ 3487{
3487 struct e1000_hw *hw = &adapter->hw; 3488 struct e1000_hw *hw = &adapter->hw;
3488 struct hwtstamp_config *config = &adapter->hwtstamp_config;
3489 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; 3489 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
3490 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 3490 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
3491 u32 rxmtrl = 0; 3491 u32 rxmtrl = 0;
@@ -3586,6 +3586,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter)
3586 return -ERANGE; 3586 return -ERANGE;
3587 } 3587 }
3588 3588
3589 adapter->hwtstamp_config = *config;
3590
3589 /* enable/disable Tx h/w time stamping */ 3591 /* enable/disable Tx h/w time stamping */
3590 regval = er32(TSYNCTXCTL); 3592 regval = er32(TSYNCTXCTL);
3591 regval &= ~E1000_TSYNCTXCTL_ENABLED; 3593 regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -3874,7 +3876,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3874 e1000e_reset_adaptive(hw); 3876 e1000e_reset_adaptive(hw);
3875 3877
3876 /* initialize systim and reset the ns time counter */ 3878 /* initialize systim and reset the ns time counter */
3877 e1000e_config_hwtstamp(adapter); 3879 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
3878 3880
3879 /* Set EEE advertisement as appropriate */ 3881 /* Set EEE advertisement as appropriate */
3880 if (adapter->flags2 & FLAG2_HAS_EEE) { 3882 if (adapter->flags2 & FLAG2_HAS_EEE) {
@@ -5797,14 +5799,10 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
5797 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 5799 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5798 return -EFAULT; 5800 return -EFAULT;
5799 5801
5800 adapter->hwtstamp_config = config; 5802 ret_val = e1000e_config_hwtstamp(adapter, &config);
5801
5802 ret_val = e1000e_config_hwtstamp(adapter);
5803 if (ret_val) 5803 if (ret_val)
5804 return ret_val; 5804 return ret_val;
5805 5805
5806 config = adapter->hwtstamp_config;
5807
5808 switch (config.rx_filter) { 5806 switch (config.rx_filter) {
5809 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 5807 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5810 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 5808 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index be15938ba213..12b0932204ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -354,6 +354,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
354 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 354 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
355 int i; 355 int i;
356 356
357 if (!vsi->tx_rings)
358 return stats;
359
357 rcu_read_lock(); 360 rcu_read_lock();
358 for (i = 0; i < vsi->num_queue_pairs; i++) { 361 for (i = 0; i < vsi->num_queue_pairs; i++) {
359 struct i40e_ring *tx_ring, *rx_ring; 362 struct i40e_ring *tx_ring, *rx_ring;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index c4c4fe332c7e..ad2b74d95138 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1728,7 +1728,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1728 * ownership of the resources, wait and try again to 1728 * ownership of the resources, wait and try again to
1729 * see if they have relinquished the resources yet. 1729 * see if they have relinquished the resources yet.
1730 */ 1730 */
1731 udelay(usec_interval); 1731 if (usec_interval >= 1000)
1732 mdelay(usec_interval/1000);
1733 else
1734 udelay(usec_interval);
1732 } 1735 }
1733 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 1736 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1734 if (ret_val) 1737 if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index b0f3666b1d7f..c3143da497c8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2062,14 +2062,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2062{ 2062{
2063 struct igb_adapter *adapter = netdev_priv(netdev); 2063 struct igb_adapter *adapter = netdev_priv(netdev);
2064 2064
2065 wol->supported = WAKE_UCAST | WAKE_MCAST |
2066 WAKE_BCAST | WAKE_MAGIC |
2067 WAKE_PHY;
2068 wol->wolopts = 0; 2065 wol->wolopts = 0;
2069 2066
2070 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) 2067 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
2071 return; 2068 return;
2072 2069
2070 wol->supported = WAKE_UCAST | WAKE_MCAST |
2071 WAKE_BCAST | WAKE_MAGIC |
2072 WAKE_PHY;
2073
2073 /* apply any specific unsupported masks here */ 2074 /* apply any specific unsupported masks here */
2074 switch (adapter->hw.device_id) { 2075 switch (adapter->hw.device_id) {
2075 default: 2076 default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0c55079ebee3..cc06854296a3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4251,8 +4251,8 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
4251 rx_ring->l2_accel_priv = NULL; 4251 rx_ring->l2_accel_priv = NULL;
4252} 4252}
4253 4253
4254int ixgbe_fwd_ring_down(struct net_device *vdev, 4254static int ixgbe_fwd_ring_down(struct net_device *vdev,
4255 struct ixgbe_fwd_adapter *accel) 4255 struct ixgbe_fwd_adapter *accel)
4256{ 4256{
4257 struct ixgbe_adapter *adapter = accel->real_adapter; 4257 struct ixgbe_adapter *adapter = accel->real_adapter;
4258 unsigned int rxbase = accel->rx_base_queue; 4258 unsigned int rxbase = accel->rx_base_queue;
@@ -7986,10 +7986,9 @@ skip_sriov:
7986 NETIF_F_TSO | 7986 NETIF_F_TSO |
7987 NETIF_F_TSO6 | 7987 NETIF_F_TSO6 |
7988 NETIF_F_RXHASH | 7988 NETIF_F_RXHASH |
7989 NETIF_F_RXCSUM | 7989 NETIF_F_RXCSUM;
7990 NETIF_F_HW_L2FW_DOFFLOAD;
7991 7990
7992 netdev->hw_features = netdev->features; 7991 netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
7993 7992
7994 switch (adapter->hw.mac.type) { 7993 switch (adapter->hw.mac.type) {
7995 case ixgbe_mac_82599EB: 7994 case ixgbe_mac_82599EB:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e4c676006be9..39217e5ff7dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -46,6 +46,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl);
46static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); 46static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
47static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 47static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
48static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 48static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
49static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
49 50
50/** 51/**
51 * ixgbe_identify_phy_generic - Get physical layer module 52 * ixgbe_identify_phy_generic - Get physical layer module
@@ -1164,7 +1165,7 @@ err_read_i2c_eeprom:
1164 * 1165 *
1165 * Searches for and identifies the QSFP module and assigns appropriate PHY type 1166 * Searches for and identifies the QSFP module and assigns appropriate PHY type
1166 **/ 1167 **/
1167s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) 1168static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1168{ 1169{
1169 struct ixgbe_adapter *adapter = hw->back; 1170 struct ixgbe_adapter *adapter = hw->back;
1170 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1171 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index aae900a256da..fffcbdd2bf0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -145,7 +145,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
145s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 145s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
146s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); 146s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
147s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 147s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
148s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
149s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 148s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
150 u16 *list_offset, 149 u16 *list_offset,
151 u16 *data_offset); 150 u16 *data_offset);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 00cd36e08601..61088a6a9424 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2890,7 +2890,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2890 PHY_INTERFACE_MODE_GMII); 2890 PHY_INTERFACE_MODE_GMII);
2891 if (!mp->phy) 2891 if (!mp->phy)
2892 err = -ENODEV; 2892 err = -ENODEV;
2893 phy_addr_set(mp, mp->phy->addr); 2893 else
2894 phy_addr_set(mp, mp->phy->addr);
2894 } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { 2895 } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
2895 mp->phy = phy_scan(mp, pd->phy_addr); 2896 mp->phy = phy_scan(mp, pd->phy_addr);
2896 2897
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b8e232b4ea2d..d5f0d72e5e33 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1378 1378
1379 dev_kfree_skb_any(skb); 1379 dev_kfree_skb_any(skb);
1380 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1380 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1381 rx_desc->data_size, DMA_FROM_DEVICE); 1381 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1382 } 1382 }
1383 1383
1384 if (rx_done) 1384 if (rx_done)
@@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1424 } 1424 }
1425 1425
1426 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1426 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1427 rx_desc->data_size, DMA_FROM_DEVICE); 1427 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1428 1428
1429 rx_bytes = rx_desc->data_size - 1429 rx_bytes = rx_desc->data_size -
1430 (ETH_FCS_LEN + MVNETA_MH_SIZE); 1430 (ETH_FCS_LEN + MVNETA_MH_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 40626690e8a8..c11d063473e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -140,7 +140,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
140{ 140{
141 struct mlx4_en_priv *priv = netdev_priv(dev); 141 struct mlx4_en_priv *priv = netdev_priv(dev);
142 struct mlx4_en_dev *mdev = priv->mdev; 142 struct mlx4_en_dev *mdev = priv->mdev;
143 struct mlx4_en_tx_ring *tx_ring;
144 int i, carrier_ok; 143 int i, carrier_ok;
145 144
146 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); 145 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@@ -150,16 +149,10 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
150 carrier_ok = netif_carrier_ok(dev); 149 carrier_ok = netif_carrier_ok(dev);
151 150
152 netif_carrier_off(dev); 151 netif_carrier_off(dev);
153retry_tx:
154 /* Wait until all tx queues are empty. 152 /* Wait until all tx queues are empty.
155 * there should not be any additional incoming traffic 153 * there should not be any additional incoming traffic
156 * since we turned the carrier off */ 154 * since we turned the carrier off */
157 msleep(200); 155 msleep(200);
158 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
159 tx_ring = priv->tx_ring[i];
160 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
161 goto retry_tx;
162 }
163 156
164 if (priv->mdev->dev->caps.flags & 157 if (priv->mdev->dev->caps.flags &
165 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { 158 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5789ea2c934d..01fc6515384d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2635,6 +2635,8 @@ static int __init mlx4_init(void)
2635 return -ENOMEM; 2635 return -ENOMEM;
2636 2636
2637 ret = pci_register_driver(&mlx4_driver); 2637 ret = pci_register_driver(&mlx4_driver);
2638 if (ret < 0)
2639 destroy_workqueue(mlx4_wq);
2638 return ret < 0 ? ret : 0; 2640 return ret < 0 ? ret : 0;
2639} 2641}
2640 2642
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 0951f7aca1ef..822616e3c375 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; 459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
460 460
461 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, 461 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
462 &ctl->sg, 1, DMA_MEM_TO_DEV, 462 &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
463 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
464 if (!ctl->adesc) 463 if (!ctl->adesc)
465 return NETDEV_TX_BUSY; 464 return NETDEV_TX_BUSY;
466 465
@@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
571 sg_dma_len(sg) = DMA_BUFFER_SIZE; 570 sg_dma_len(sg) = DMA_BUFFER_SIZE;
572 571
573 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, 572 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
574 sg, 1, DMA_DEV_TO_MEM, 573 sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
575 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
576 574
577 if (!ctl->adesc) 575 if (!ctl->adesc)
578 goto out; 576 goto out;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 2d045be4b5cf..1e8b9514718b 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5150,8 +5150,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5150{ 5150{
5151 struct fe_priv *np = netdev_priv(dev); 5151 struct fe_priv *np = netdev_priv(dev);
5152 u8 __iomem *base = get_hwbase(dev); 5152 u8 __iomem *base = get_hwbase(dev);
5153 int result; 5153 int result, count;
5154 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 5154
5155 count = nv_get_sset_count(dev, ETH_SS_TEST);
5156 memset(buffer, 0, count * sizeof(u64));
5155 5157
5156 if (!nv_link_test(dev)) { 5158 if (!nv_link_test(dev)) {
5157 test->flags |= ETH_TEST_FL_FAILED; 5159 test->flags |= ETH_TEST_FL_FAILED;
@@ -5195,7 +5197,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5195 return; 5197 return;
5196 } 5198 }
5197 5199
5198 if (!nv_loopback_test(dev)) { 5200 if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
5199 test->flags |= ETH_TEST_FL_FAILED; 5201 test->flags |= ETH_TEST_FL_FAILED;
5200 buffer[3] = 1; 5202 buffer[3] = 1;
5201 } 5203 }
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 5a0f04c2c813..27ffe0ebf0a6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -245,16 +245,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
245 /* Get ieee1588's dev information */ 245 /* Get ieee1588's dev information */
246 pdev = adapter->ptp_pdev; 246 pdev = adapter->ptp_pdev;
247 247
248 switch (cfg.tx_type) { 248 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
249 case HWTSTAMP_TX_OFF:
250 adapter->hwts_tx_en = 0;
251 break;
252 case HWTSTAMP_TX_ON:
253 adapter->hwts_tx_en = 1;
254 break;
255 default:
256 return -ERANGE; 249 return -ERANGE;
257 }
258 250
259 switch (cfg.rx_filter) { 251 switch (cfg.rx_filter) {
260 case HWTSTAMP_FILTER_NONE: 252 case HWTSTAMP_FILTER_NONE:
@@ -284,6 +276,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
284 return -ERANGE; 276 return -ERANGE;
285 } 277 }
286 278
279 adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
280
287 /* Clear out any old time stamps. */ 281 /* Clear out any old time stamps. */
288 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED); 282 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
289 283
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 0c9c4e895595..03517478e589 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "1.00.00.33" 21#define DRV_VERSION "1.00.00.34"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 0780e039b271..8dee1beb9854 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
181}; 181};
182#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) 182#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
183#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) 183#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
184#define QLGE_RCV_MAC_ERR_STATS 7
184 185
185static int ql_update_ring_coalescing(struct ql_adapter *qdev) 186static int ql_update_ring_coalescing(struct ql_adapter *qdev)
186{ 187{
@@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
280 iter++; 281 iter++;
281 } 282 }
282 283
284 /* Update receive mac error statistics */
285 iter += QLGE_RCV_MAC_ERR_STATS;
286
283 /* 287 /*
284 * Get Per-priority TX pause frame counter statistics. 288 * Get Per-priority TX pause frame counter statistics.
285 */ 289 */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index a245dc18d769..449f506d2e8f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
2376 netdev_features_t features) 2376 netdev_features_t features)
2377{ 2377{
2378 int err; 2378 int err;
2379 /*
2380 * Since there is no support for separate rx/tx vlan accel
2381 * enable/disable make sure tx flag is always in same state as rx.
2382 */
2383 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2384 features |= NETIF_F_HW_VLAN_CTAG_TX;
2385 else
2386 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2387 2379
2388 /* Update the behavior of vlan accel in the adapter */ 2380 /* Update the behavior of vlan accel in the adapter */
2389 err = qlge_update_hw_vlan_features(ndev, features); 2381 err = qlge_update_hw_vlan_features(ndev, features);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index f2a2128165dd..737c1a881f78 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp)
678 le32_to_cpu(txd->opts1) & 0xffff, 678 le32_to_cpu(txd->opts1) & 0xffff,
679 PCI_DMA_TODEVICE); 679 PCI_DMA_TODEVICE);
680 680
681 bytes_compl += skb->len;
682 pkts_compl++;
683
684 if (status & LastFrag) { 681 if (status & LastFrag) {
685 if (status & (TxError | TxFIFOUnder)) { 682 if (status & (TxError | TxFIFOUnder)) {
686 netif_dbg(cp, tx_err, cp->dev, 683 netif_dbg(cp, tx_err, cp->dev,
@@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp)
702 netif_dbg(cp, tx_done, cp->dev, 699 netif_dbg(cp, tx_done, cp->dev,
703 "tx done, slot %d\n", tx_tail); 700 "tx done, slot %d\n", tx_tail);
704 } 701 }
702 bytes_compl += skb->len;
703 pkts_compl++;
705 dev_kfree_skb_irq(skb); 704 dev_kfree_skb_irq(skb);
706 } 705 }
707 706
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 799387570766..c737f0ea5de7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3465 rtl_writephy(tp, 0x14, 0x9065); 3465 rtl_writephy(tp, 0x14, 0x9065);
3466 rtl_writephy(tp, 0x14, 0x1065); 3466 rtl_writephy(tp, 0x14, 0x1065);
3467 3467
3468 /* Check ALDPS bit, disable it if enabled */
3469 rtl_writephy(tp, 0x1f, 0x0a43);
3470 if (rtl_readphy(tp, 0x10) & 0x0004)
3471 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
3472
3468 rtl_writephy(tp, 0x1f, 0x0000); 3473 rtl_writephy(tp, 0x1f, 0x0000);
3469} 3474}
3470 3475
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 2e27837ce6a2..fd844b53e385 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx)
585 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 585 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
586 efx->type->rx_buffer_padding); 586 efx->type->rx_buffer_padding);
587 rx_buf_len = (sizeof(struct efx_rx_page_state) + 587 rx_buf_len = (sizeof(struct efx_rx_page_state) +
588 NET_IP_ALIGN + efx->rx_dma_len); 588 efx->rx_ip_align + efx->rx_dma_len);
589 if (rx_buf_len <= PAGE_SIZE) { 589 if (rx_buf_len <= PAGE_SIZE) {
590 efx->rx_scatter = efx->type->always_rx_scatter; 590 efx->rx_scatter = efx->type->always_rx_scatter;
591 efx->rx_buffer_order = 0; 591 efx->rx_buffer_order = 0;
@@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx)
645 WARN_ON(channel->rx_pkt_n_frags); 645 WARN_ON(channel->rx_pkt_n_frags);
646 } 646 }
647 647
648 efx_ptp_start_datapath(efx);
649
648 if (netif_device_present(efx->net_dev)) 650 if (netif_device_present(efx->net_dev))
649 netif_tx_wake_all_queues(efx->net_dev); 651 netif_tx_wake_all_queues(efx->net_dev);
650} 652}
@@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx)
659 EFX_ASSERT_RESET_SERIALISED(efx); 661 EFX_ASSERT_RESET_SERIALISED(efx);
660 BUG_ON(efx->port_enabled); 662 BUG_ON(efx->port_enabled);
661 663
664 efx_ptp_stop_datapath(efx);
665
662 /* Stop RX refill */ 666 /* Stop RX refill */
663 efx_for_each_channel(channel, efx) { 667 efx_for_each_channel(channel, efx) {
664 efx_for_each_channel_rx_queue(rx_queue, channel) 668 efx_for_each_channel_rx_queue(rx_queue, channel)
@@ -2540,6 +2544,8 @@ static int efx_init_struct(struct efx_nic *efx,
2540 2544
2541 efx->net_dev = net_dev; 2545 efx->net_dev = net_dev;
2542 efx->rx_prefix_size = efx->type->rx_prefix_size; 2546 efx->rx_prefix_size = efx->type->rx_prefix_size;
2547 efx->rx_ip_align =
2548 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2543 efx->rx_packet_hash_offset = 2549 efx->rx_packet_hash_offset =
2544 efx->type->rx_hash_offset - efx->type->rx_prefix_size; 2550 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2545 spin_lock_init(&efx->stats_lock); 2551 spin_lock_init(&efx->stats_lock);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 366c8e3e3784..4b0bd8a1514d 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -50,6 +50,7 @@ struct efx_mcdi_async_param {
50static void efx_mcdi_timeout_async(unsigned long context); 50static void efx_mcdi_timeout_async(unsigned long context);
51static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 51static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
52 bool *was_attached_out); 52 bool *was_attached_out);
53static bool efx_mcdi_poll_once(struct efx_nic *efx);
53 54
54static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 55static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
55{ 56{
@@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
237 } 238 }
238} 239}
239 240
241static bool efx_mcdi_poll_once(struct efx_nic *efx)
242{
243 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
244
245 rmb();
246 if (!efx->type->mcdi_poll_response(efx))
247 return false;
248
249 spin_lock_bh(&mcdi->iface_lock);
250 efx_mcdi_read_response_header(efx);
251 spin_unlock_bh(&mcdi->iface_lock);
252
253 return true;
254}
255
240static int efx_mcdi_poll(struct efx_nic *efx) 256static int efx_mcdi_poll(struct efx_nic *efx)
241{ 257{
242 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 258 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
@@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx)
272 288
273 time = jiffies; 289 time = jiffies;
274 290
275 rmb(); 291 if (efx_mcdi_poll_once(efx))
276 if (efx->type->mcdi_poll_response(efx))
277 break; 292 break;
278 293
279 if (time_after(time, finish)) 294 if (time_after(time, finish))
280 return -ETIMEDOUT; 295 return -ETIMEDOUT;
281 } 296 }
282 297
283 spin_lock_bh(&mcdi->iface_lock);
284 efx_mcdi_read_response_header(efx);
285 spin_unlock_bh(&mcdi->iface_lock);
286
287 /* Return rc=0 like wait_event_timeout() */ 298 /* Return rc=0 like wait_event_timeout() */
288 return 0; 299 return 0;
289} 300}
@@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
619 rc = efx_mcdi_await_completion(efx); 630 rc = efx_mcdi_await_completion(efx);
620 631
621 if (rc != 0) { 632 if (rc != 0) {
633 netif_err(efx, hw, efx->net_dev,
634 "MC command 0x%x inlen %d mode %d timed out\n",
635 cmd, (int)inlen, mcdi->mode);
636
637 if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
638 netif_err(efx, hw, efx->net_dev,
639 "MCDI request was completed without an event\n");
640 rc = 0;
641 }
642
622 /* Close the race with efx_mcdi_ev_cpl() executing just too late 643 /* Close the race with efx_mcdi_ev_cpl() executing just too late
623 * and completing a request we've just cancelled, by ensuring 644 * and completing a request we've just cancelled, by ensuring
624 * that the seqno check therein fails. 645 * that the seqno check therein fails.
@@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
627 ++mcdi->seqno; 648 ++mcdi->seqno;
628 ++mcdi->credits; 649 ++mcdi->credits;
629 spin_unlock_bh(&mcdi->iface_lock); 650 spin_unlock_bh(&mcdi->iface_lock);
651 }
630 652
631 netif_err(efx, hw, efx->net_dev, 653 if (rc == 0) {
632 "MC command 0x%x inlen %d mode %d timed out\n",
633 cmd, (int)inlen, mcdi->mode);
634 } else {
635 size_t hdr_len, data_len; 654 size_t hdr_len, data_len;
636 655
637 /* At the very least we need a memory barrier here to ensure 656 /* At the very least we need a memory barrier here to ensure
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 656a3277c2b2..15816cacb548 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -75,6 +75,8 @@ struct efx_mcdi_mon {
75 unsigned long last_update; 75 unsigned long last_update;
76 struct device *device; 76 struct device *device;
77 struct efx_mcdi_mon_attribute *attrs; 77 struct efx_mcdi_mon_attribute *attrs;
78 struct attribute_group group;
79 const struct attribute_group *groups[2];
78 unsigned int n_attrs; 80 unsigned int n_attrs;
79}; 81};
80 82
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 4cc5d95b2a5a..d72ad4fc3617 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -139,17 +139,10 @@ static int efx_mcdi_mon_update(struct efx_nic *efx)
139 return rc; 139 return rc;
140} 140}
141 141
142static ssize_t efx_mcdi_mon_show_name(struct device *dev,
143 struct device_attribute *attr,
144 char *buf)
145{
146 return sprintf(buf, "%s\n", KBUILD_MODNAME);
147}
148
149static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, 142static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
150 efx_dword_t *entry) 143 efx_dword_t *entry)
151{ 144{
152 struct efx_nic *efx = dev_get_drvdata(dev); 145 struct efx_nic *efx = dev_get_drvdata(dev->parent);
153 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 146 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
154 int rc; 147 int rc;
155 148
@@ -263,7 +256,7 @@ static ssize_t efx_mcdi_mon_show_label(struct device *dev,
263 efx_mcdi_sensor_type[mon_attr->type].label); 256 efx_mcdi_sensor_type[mon_attr->type].label);
264} 257}
265 258
266static int 259static void
267efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, 260efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
268 ssize_t (*reader)(struct device *, 261 ssize_t (*reader)(struct device *,
269 struct device_attribute *, char *), 262 struct device_attribute *, char *),
@@ -272,7 +265,6 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
272{ 265{
273 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 266 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
274 struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; 267 struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
275 int rc;
276 268
277 strlcpy(attr->name, name, sizeof(attr->name)); 269 strlcpy(attr->name, name, sizeof(attr->name));
278 attr->index = index; 270 attr->index = index;
@@ -286,10 +278,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
286 attr->dev_attr.attr.name = attr->name; 278 attr->dev_attr.attr.name = attr->name;
287 attr->dev_attr.attr.mode = S_IRUGO; 279 attr->dev_attr.attr.mode = S_IRUGO;
288 attr->dev_attr.show = reader; 280 attr->dev_attr.show = reader;
289 rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr); 281 hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
290 if (rc == 0)
291 ++hwmon->n_attrs;
292 return rc;
293} 282}
294 283
295int efx_mcdi_mon_probe(struct efx_nic *efx) 284int efx_mcdi_mon_probe(struct efx_nic *efx)
@@ -338,26 +327,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
338 efx_mcdi_mon_update(efx); 327 efx_mcdi_mon_update(efx);
339 328
340 /* Allocate space for the maximum possible number of 329 /* Allocate space for the maximum possible number of
341 * attributes for this set of sensors: name of the driver plus 330 * attributes for this set of sensors:
342 * value, min, max, crit, alarm and label for each sensor. 331 * value, min, max, crit, alarm and label for each sensor.
343 */ 332 */
344 n_attrs = 1 + 6 * n_sensors; 333 n_attrs = 6 * n_sensors;
345 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); 334 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
346 if (!hwmon->attrs) { 335 if (!hwmon->attrs) {
347 rc = -ENOMEM; 336 rc = -ENOMEM;
348 goto fail; 337 goto fail;
349 } 338 }
350 339 hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
351 hwmon->device = hwmon_device_register(&efx->pci_dev->dev); 340 GFP_KERNEL);
352 if (IS_ERR(hwmon->device)) { 341 if (!hwmon->group.attrs) {
353 rc = PTR_ERR(hwmon->device); 342 rc = -ENOMEM;
354 goto fail; 343 goto fail;
355 } 344 }
356 345
357 rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0);
358 if (rc)
359 goto fail;
360
361 for (i = 0, j = -1, type = -1; ; i++) { 346 for (i = 0, j = -1, type = -1; ; i++) {
362 enum efx_hwmon_type hwmon_type; 347 enum efx_hwmon_type hwmon_type;
363 const char *hwmon_prefix; 348 const char *hwmon_prefix;
@@ -372,7 +357,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
372 page = type / 32; 357 page = type / 32;
373 j = -1; 358 j = -1;
374 if (page == n_pages) 359 if (page == n_pages)
375 return 0; 360 goto hwmon_register;
376 361
377 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, 362 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
378 page); 363 page);
@@ -453,28 +438,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
453 if (min1 != max1) { 438 if (min1 != max1) {
454 snprintf(name, sizeof(name), "%s%u_input", 439 snprintf(name, sizeof(name), "%s%u_input",
455 hwmon_prefix, hwmon_index); 440 hwmon_prefix, hwmon_index);
456 rc = efx_mcdi_mon_add_attr( 441 efx_mcdi_mon_add_attr(
457 efx, name, efx_mcdi_mon_show_value, i, type, 0); 442 efx, name, efx_mcdi_mon_show_value, i, type, 0);
458 if (rc)
459 goto fail;
460 443
461 if (hwmon_type != EFX_HWMON_POWER) { 444 if (hwmon_type != EFX_HWMON_POWER) {
462 snprintf(name, sizeof(name), "%s%u_min", 445 snprintf(name, sizeof(name), "%s%u_min",
463 hwmon_prefix, hwmon_index); 446 hwmon_prefix, hwmon_index);
464 rc = efx_mcdi_mon_add_attr( 447 efx_mcdi_mon_add_attr(
465 efx, name, efx_mcdi_mon_show_limit, 448 efx, name, efx_mcdi_mon_show_limit,
466 i, type, min1); 449 i, type, min1);
467 if (rc)
468 goto fail;
469 } 450 }
470 451
471 snprintf(name, sizeof(name), "%s%u_max", 452 snprintf(name, sizeof(name), "%s%u_max",
472 hwmon_prefix, hwmon_index); 453 hwmon_prefix, hwmon_index);
473 rc = efx_mcdi_mon_add_attr( 454 efx_mcdi_mon_add_attr(
474 efx, name, efx_mcdi_mon_show_limit, 455 efx, name, efx_mcdi_mon_show_limit,
475 i, type, max1); 456 i, type, max1);
476 if (rc)
477 goto fail;
478 457
479 if (min2 != max2) { 458 if (min2 != max2) {
480 /* Assume max2 is critical value. 459 /* Assume max2 is critical value.
@@ -482,32 +461,38 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
482 */ 461 */
483 snprintf(name, sizeof(name), "%s%u_crit", 462 snprintf(name, sizeof(name), "%s%u_crit",
484 hwmon_prefix, hwmon_index); 463 hwmon_prefix, hwmon_index);
485 rc = efx_mcdi_mon_add_attr( 464 efx_mcdi_mon_add_attr(
486 efx, name, efx_mcdi_mon_show_limit, 465 efx, name, efx_mcdi_mon_show_limit,
487 i, type, max2); 466 i, type, max2);
488 if (rc)
489 goto fail;
490 } 467 }
491 } 468 }
492 469
493 snprintf(name, sizeof(name), "%s%u_alarm", 470 snprintf(name, sizeof(name), "%s%u_alarm",
494 hwmon_prefix, hwmon_index); 471 hwmon_prefix, hwmon_index);
495 rc = efx_mcdi_mon_add_attr( 472 efx_mcdi_mon_add_attr(
496 efx, name, efx_mcdi_mon_show_alarm, i, type, 0); 473 efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
497 if (rc)
498 goto fail;
499 474
500 if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && 475 if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
501 efx_mcdi_sensor_type[type].label) { 476 efx_mcdi_sensor_type[type].label) {
502 snprintf(name, sizeof(name), "%s%u_label", 477 snprintf(name, sizeof(name), "%s%u_label",
503 hwmon_prefix, hwmon_index); 478 hwmon_prefix, hwmon_index);
504 rc = efx_mcdi_mon_add_attr( 479 efx_mcdi_mon_add_attr(
505 efx, name, efx_mcdi_mon_show_label, i, type, 0); 480 efx, name, efx_mcdi_mon_show_label, i, type, 0);
506 if (rc)
507 goto fail;
508 } 481 }
509 } 482 }
510 483
484hwmon_register:
485 hwmon->groups[0] = &hwmon->group;
486 hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
487 KBUILD_MODNAME, NULL,
488 hwmon->groups);
489 if (IS_ERR(hwmon->device)) {
490 rc = PTR_ERR(hwmon->device);
491 goto fail;
492 }
493
494 return 0;
495
511fail: 496fail:
512 efx_mcdi_mon_remove(efx); 497 efx_mcdi_mon_remove(efx);
513 return rc; 498 return rc;
@@ -516,14 +501,11 @@ fail:
516void efx_mcdi_mon_remove(struct efx_nic *efx) 501void efx_mcdi_mon_remove(struct efx_nic *efx)
517{ 502{
518 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 503 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
519 unsigned int i;
520 504
521 for (i = 0; i < hwmon->n_attrs; i++)
522 device_remove_file(&efx->pci_dev->dev,
523 &hwmon->attrs[i].dev_attr);
524 kfree(hwmon->attrs);
525 if (hwmon->device) 505 if (hwmon->device)
526 hwmon_device_unregister(hwmon->device); 506 hwmon_device_unregister(hwmon->device);
507 kfree(hwmon->attrs);
508 kfree(hwmon->group.attrs);
527 efx_nic_free_buffer(efx, &hwmon->dma_buf); 509 efx_nic_free_buffer(efx, &hwmon->dma_buf);
528} 510}
529 511
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b14a717ac3e8..542a0d252ae0 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -683,6 +683,8 @@ struct vfdi_status;
683 * @n_channels: Number of channels in use 683 * @n_channels: Number of channels in use
684 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 684 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
685 * @n_tx_channels: Number of channels used for TX 685 * @n_tx_channels: Number of channels used for TX
686 * @rx_ip_align: RX DMA address offset to have IP header aligned in
687 * in accordance with NET_IP_ALIGN
686 * @rx_dma_len: Current maximum RX DMA length 688 * @rx_dma_len: Current maximum RX DMA length
687 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 689 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
688 * @rx_buffer_truesize: Amortised allocation size of an RX buffer, 690 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
@@ -816,6 +818,7 @@ struct efx_nic {
816 unsigned rss_spread; 818 unsigned rss_spread;
817 unsigned tx_channel_offset; 819 unsigned tx_channel_offset;
818 unsigned n_tx_channels; 820 unsigned n_tx_channels;
821 unsigned int rx_ip_align;
819 unsigned int rx_dma_len; 822 unsigned int rx_dma_len;
820 unsigned int rx_buffer_order; 823 unsigned int rx_buffer_order;
821 unsigned int rx_buffer_truesize; 824 unsigned int rx_buffer_truesize;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 11b6112d9249..91c63ec79c5f 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -560,6 +560,8 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
560bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 560bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
561int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 561int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
562void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); 562void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
563void efx_ptp_start_datapath(struct efx_nic *efx);
564void efx_ptp_stop_datapath(struct efx_nic *efx);
563 565
564extern const struct efx_nic_type falcon_a1_nic_type; 566extern const struct efx_nic_type falcon_a1_nic_type;
565extern const struct efx_nic_type falcon_b0_nic_type; 567extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 03acf57df045..3dd39dcfe36b 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -220,6 +220,7 @@ struct efx_ptp_timeset {
220 * @evt_list: List of MC receive events awaiting packets 220 * @evt_list: List of MC receive events awaiting packets
221 * @evt_free_list: List of free events 221 * @evt_free_list: List of free events
222 * @evt_lock: Lock for manipulating evt_list and evt_free_list 222 * @evt_lock: Lock for manipulating evt_list and evt_free_list
223 * @evt_overflow: Boolean indicating that event list has overflowed
223 * @rx_evts: Instantiated events (on evt_list and evt_free_list) 224 * @rx_evts: Instantiated events (on evt_list and evt_free_list)
224 * @workwq: Work queue for processing pending PTP operations 225 * @workwq: Work queue for processing pending PTP operations
225 * @work: Work task 226 * @work: Work task
@@ -270,6 +271,7 @@ struct efx_ptp_data {
270 struct list_head evt_list; 271 struct list_head evt_list;
271 struct list_head evt_free_list; 272 struct list_head evt_free_list;
272 spinlock_t evt_lock; 273 spinlock_t evt_lock;
274 bool evt_overflow;
273 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; 275 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
274 struct workqueue_struct *workwq; 276 struct workqueue_struct *workwq;
275 struct work_struct work; 277 struct work_struct work;
@@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
635 } 637 }
636 } 638 }
637 } 639 }
640 /* If the event overflow flag is set and the event list is now empty
641 * clear the flag to re-enable the overflow warning message.
642 */
643 if (ptp->evt_overflow && list_empty(&ptp->evt_list))
644 ptp->evt_overflow = false;
638 spin_unlock_bh(&ptp->evt_lock); 645 spin_unlock_bh(&ptp->evt_lock);
639} 646}
640 647
@@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
676 break; 683 break;
677 } 684 }
678 } 685 }
686 /* If the event overflow flag is set and the event list is now empty
687 * clear the flag to re-enable the overflow warning message.
688 */
689 if (ptp->evt_overflow && list_empty(&ptp->evt_list))
690 ptp->evt_overflow = false;
679 spin_unlock_bh(&ptp->evt_lock); 691 spin_unlock_bh(&ptp->evt_lock);
680 692
681 return rc; 693 return rc;
@@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
705 __skb_queue_tail(q, skb); 717 __skb_queue_tail(q, skb);
706 } else if (time_after(jiffies, match->expiry)) { 718 } else if (time_after(jiffies, match->expiry)) {
707 match->state = PTP_PACKET_STATE_TIMED_OUT; 719 match->state = PTP_PACKET_STATE_TIMED_OUT;
708 netif_warn(efx, rx_err, efx->net_dev, 720 if (net_ratelimit())
709 "PTP packet - no timestamp seen\n"); 721 netif_warn(efx, rx_err, efx->net_dev,
722 "PTP packet - no timestamp seen\n");
710 __skb_queue_tail(q, skb); 723 __skb_queue_tail(q, skb);
711 } else { 724 } else {
712 /* Replace unprocessed entry and stop */ 725 /* Replace unprocessed entry and stop */
@@ -788,9 +801,14 @@ fail:
788static int efx_ptp_stop(struct efx_nic *efx) 801static int efx_ptp_stop(struct efx_nic *efx)
789{ 802{
790 struct efx_ptp_data *ptp = efx->ptp_data; 803 struct efx_ptp_data *ptp = efx->ptp_data;
791 int rc = efx_ptp_disable(efx);
792 struct list_head *cursor; 804 struct list_head *cursor;
793 struct list_head *next; 805 struct list_head *next;
806 int rc;
807
808 if (ptp == NULL)
809 return 0;
810
811 rc = efx_ptp_disable(efx);
794 812
795 if (ptp->rxfilter_installed) { 813 if (ptp->rxfilter_installed) {
796 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, 814 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
@@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx)
809 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { 827 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
810 list_move(cursor, &efx->ptp_data->evt_free_list); 828 list_move(cursor, &efx->ptp_data->evt_free_list);
811 } 829 }
830 ptp->evt_overflow = false;
812 spin_unlock_bh(&efx->ptp_data->evt_lock); 831 spin_unlock_bh(&efx->ptp_data->evt_lock);
813 832
814 return rc; 833 return rc;
815} 834}
816 835
836static int efx_ptp_restart(struct efx_nic *efx)
837{
838 if (efx->ptp_data && efx->ptp_data->enabled)
839 return efx_ptp_start(efx);
840 return 0;
841}
842
817static void efx_ptp_pps_worker(struct work_struct *work) 843static void efx_ptp_pps_worker(struct work_struct *work)
818{ 844{
819 struct efx_ptp_data *ptp = 845 struct efx_ptp_data *ptp =
@@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
901 spin_lock_init(&ptp->evt_lock); 927 spin_lock_init(&ptp->evt_lock);
902 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) 928 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
903 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); 929 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
930 ptp->evt_overflow = false;
904 931
905 ptp->phc_clock_info.owner = THIS_MODULE; 932 ptp->phc_clock_info.owner = THIS_MODULE;
906 snprintf(ptp->phc_clock_info.name, 933 snprintf(ptp->phc_clock_info.name,
@@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
989 skb->len >= PTP_MIN_LENGTH && 1016 skb->len >= PTP_MIN_LENGTH &&
990 skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && 1017 skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
991 likely(skb->protocol == htons(ETH_P_IP)) && 1018 likely(skb->protocol == htons(ETH_P_IP)) &&
1019 skb_transport_header_was_set(skb) &&
1020 skb_network_header_len(skb) >= sizeof(struct iphdr) &&
992 ip_hdr(skb)->protocol == IPPROTO_UDP && 1021 ip_hdr(skb)->protocol == IPPROTO_UDP &&
1022 skb_headlen(skb) >=
1023 skb_transport_offset(skb) + sizeof(struct udphdr) &&
993 udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); 1024 udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
994} 1025}
995 1026
@@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
1106{ 1137{
1107 if ((enable_wanted != efx->ptp_data->enabled) || 1138 if ((enable_wanted != efx->ptp_data->enabled) ||
1108 (enable_wanted && (efx->ptp_data->mode != new_mode))) { 1139 (enable_wanted && (efx->ptp_data->mode != new_mode))) {
1109 int rc; 1140 int rc = 0;
1110 1141
1111 if (enable_wanted) { 1142 if (enable_wanted) {
1112 /* Change of mode requires disable */ 1143 /* Change of mode requires disable */
@@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
1123 * succeed. 1154 * succeed.
1124 */ 1155 */
1125 efx->ptp_data->mode = new_mode; 1156 efx->ptp_data->mode = new_mode;
1126 rc = efx_ptp_start(efx); 1157 if (netif_running(efx->net_dev))
1158 rc = efx_ptp_start(efx);
1127 if (rc == 0) { 1159 if (rc == 0) {
1128 rc = efx_ptp_synchronize(efx, 1160 rc = efx_ptp_synchronize(efx,
1129 PTP_SYNC_ATTEMPTS * 2); 1161 PTP_SYNC_ATTEMPTS * 2);
@@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
1295 list_add_tail(&evt->link, &ptp->evt_list); 1327 list_add_tail(&evt->link, &ptp->evt_list);
1296 1328
1297 queue_work(ptp->workwq, &ptp->work); 1329 queue_work(ptp->workwq, &ptp->work);
1298 } else { 1330 } else if (!ptp->evt_overflow) {
1299 netif_err(efx, rx_err, efx->net_dev, "No free PTP event"); 1331 /* Log a warning message and set the event overflow flag.
1332 * The message won't be logged again until the event queue
1333 * becomes empty.
1334 */
1335 netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
1336 ptp->evt_overflow = true;
1300 } 1337 }
1301 spin_unlock_bh(&ptp->evt_lock); 1338 spin_unlock_bh(&ptp->evt_lock);
1302} 1339}
@@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1389 if (rc != 0) 1426 if (rc != 0)
1390 return rc; 1427 return rc;
1391 1428
1392 ptp_data->current_adjfreq = delta; 1429 ptp_data->current_adjfreq = adjustment_ns;
1393 return 0; 1430 return 0;
1394} 1431}
1395 1432
@@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
1404 1441
1405 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); 1442 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1406 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 1443 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
1407 MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); 1444 MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
1408 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); 1445 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
1409 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); 1446 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
1410 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), 1447 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx)
1491 efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = 1528 efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
1492 &efx_ptp_channel_type; 1529 &efx_ptp_channel_type;
1493} 1530}
1531
1532void efx_ptp_start_datapath(struct efx_nic *efx)
1533{
1534 if (efx_ptp_restart(efx))
1535 netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
1536}
1537
1538void efx_ptp_stop_datapath(struct efx_nic *efx)
1539{
1540 efx_ptp_stop(efx);
1541}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8f09e686fc23..42488df1f4ec 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
94 94
95void efx_rx_config_page_split(struct efx_nic *efx) 95void efx_rx_config_page_split(struct efx_nic *efx)
96{ 96{
97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, 97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
98 EFX_RX_BUF_ALIGNMENT); 98 EFX_RX_BUF_ALIGNMENT);
99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : 99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / 100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
@@ -189,9 +189,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
189 do { 189 do {
190 index = rx_queue->added_count & rx_queue->ptr_mask; 190 index = rx_queue->added_count & rx_queue->ptr_mask;
191 rx_buf = efx_rx_buffer(rx_queue, index); 191 rx_buf = efx_rx_buffer(rx_queue, index);
192 rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; 192 rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
193 rx_buf->page = page; 193 rx_buf->page = page;
194 rx_buf->page_offset = page_offset + NET_IP_ALIGN; 194 rx_buf->page_offset = page_offset + efx->rx_ip_align;
195 rx_buf->len = efx->rx_dma_len; 195 rx_buf->len = efx->rx_dma_len;
196 rx_buf->flags = 0; 196 rx_buf->flags = 0;
197 ++rx_queue->added_count; 197 ++rx_queue->added_count;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0c9b5d94154f..8bf29eb4a5a0 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -82,6 +82,7 @@ static const char version[] =
82#include <linux/mii.h> 82#include <linux/mii.h>
83#include <linux/workqueue.h> 83#include <linux/workqueue.h>
84#include <linux/of.h> 84#include <linux/of.h>
85#include <linux/of_device.h>
85 86
86#include <linux/netdevice.h> 87#include <linux/netdevice.h>
87#include <linux/etherdevice.h> 88#include <linux/etherdevice.h>
@@ -2184,6 +2185,15 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
2184 } 2185 }
2185} 2186}
2186 2187
2188#if IS_BUILTIN(CONFIG_OF)
2189static const struct of_device_id smc91x_match[] = {
2190 { .compatible = "smsc,lan91c94", },
2191 { .compatible = "smsc,lan91c111", },
2192 {},
2193};
2194MODULE_DEVICE_TABLE(of, smc91x_match);
2195#endif
2196
2187/* 2197/*
2188 * smc_init(void) 2198 * smc_init(void)
2189 * Input parameters: 2199 * Input parameters:
@@ -2198,6 +2208,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
2198static int smc_drv_probe(struct platform_device *pdev) 2208static int smc_drv_probe(struct platform_device *pdev)
2199{ 2209{
2200 struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); 2210 struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
2211 const struct of_device_id *match = NULL;
2201 struct smc_local *lp; 2212 struct smc_local *lp;
2202 struct net_device *ndev; 2213 struct net_device *ndev;
2203 struct resource *res, *ires; 2214 struct resource *res, *ires;
@@ -2217,11 +2228,34 @@ static int smc_drv_probe(struct platform_device *pdev)
2217 */ 2228 */
2218 2229
2219 lp = netdev_priv(ndev); 2230 lp = netdev_priv(ndev);
2231 lp->cfg.flags = 0;
2220 2232
2221 if (pd) { 2233 if (pd) {
2222 memcpy(&lp->cfg, pd, sizeof(lp->cfg)); 2234 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2223 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); 2235 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
2224 } else { 2236 }
2237
2238#if IS_BUILTIN(CONFIG_OF)
2239 match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev);
2240 if (match) {
2241 struct device_node *np = pdev->dev.of_node;
2242 u32 val;
2243
2244 /* Combination of IO widths supported, default to 16-bit */
2245 if (!of_property_read_u32(np, "reg-io-width", &val)) {
2246 if (val & 1)
2247 lp->cfg.flags |= SMC91X_USE_8BIT;
2248 if ((val == 0) || (val & 2))
2249 lp->cfg.flags |= SMC91X_USE_16BIT;
2250 if (val & 4)
2251 lp->cfg.flags |= SMC91X_USE_32BIT;
2252 } else {
2253 lp->cfg.flags |= SMC91X_USE_16BIT;
2254 }
2255 }
2256#endif
2257
2258 if (!pd && !match) {
2225 lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; 2259 lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
2226 lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; 2260 lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
2227 lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; 2261 lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
@@ -2370,15 +2404,6 @@ static int smc_drv_resume(struct device *dev)
2370 return 0; 2404 return 0;
2371} 2405}
2372 2406
2373#ifdef CONFIG_OF
2374static const struct of_device_id smc91x_match[] = {
2375 { .compatible = "smsc,lan91c94", },
2376 { .compatible = "smsc,lan91c111", },
2377 {},
2378};
2379MODULE_DEVICE_TABLE(of, smc91x_match);
2380#endif
2381
2382static struct dev_pm_ops smc_drv_pm_ops = { 2407static struct dev_pm_ops smc_drv_pm_ops = {
2383 .suspend = smc_drv_suspend, 2408 .suspend = smc_drv_suspend,
2384 .resume = smc_drv_resume, 2409 .resume = smc_drv_resume,
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index c9d4c872e81d..749654b976bc 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -46,7 +46,8 @@
46 defined(CONFIG_MACH_LITTLETON) ||\ 46 defined(CONFIG_MACH_LITTLETON) ||\
47 defined(CONFIG_MACH_ZYLONITE2) ||\ 47 defined(CONFIG_MACH_ZYLONITE2) ||\
48 defined(CONFIG_ARCH_VIPER) ||\ 48 defined(CONFIG_ARCH_VIPER) ||\
49 defined(CONFIG_MACH_STARGATE2) 49 defined(CONFIG_MACH_STARGATE2) ||\
50 defined(CONFIG_ARCH_VERSATILE)
50 51
51#include <asm/mach-types.h> 52#include <asm/mach-types.h>
52 53
@@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
154#define SMC_outl(v, a, r) writel(v, (a) + (r)) 155#define SMC_outl(v, a, r) writel(v, (a) + (r))
155#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 156#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
156#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 157#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
158#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
159#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
157#define SMC_IRQ_FLAGS (-1) /* from resource */ 160#define SMC_IRQ_FLAGS (-1) /* from resource */
158 161
159/* We actually can't write halfwords properly if not word aligned */ 162/* We actually can't write halfwords properly if not word aligned */
@@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
206#define RPC_LSA_DEFAULT RPC_LED_TX_RX 209#define RPC_LSA_DEFAULT RPC_LED_TX_RX
207#define RPC_LSB_DEFAULT RPC_LED_100_10 210#define RPC_LSB_DEFAULT RPC_LED_100_10
208 211
209#elif defined(CONFIG_ARCH_VERSATILE)
210
211#define SMC_CAN_USE_8BIT 1
212#define SMC_CAN_USE_16BIT 1
213#define SMC_CAN_USE_32BIT 1
214#define SMC_NOWAIT 1
215
216#define SMC_inb(a, r) readb((a) + (r))
217#define SMC_inw(a, r) readw((a) + (r))
218#define SMC_inl(a, r) readl((a) + (r))
219#define SMC_outb(v, a, r) writeb(v, (a) + (r))
220#define SMC_outw(v, a, r) writew(v, (a) + (r))
221#define SMC_outl(v, a, r) writel(v, (a) + (r))
222#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
223#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
224#define SMC_IRQ_FLAGS (-1) /* from resource */
225
226#elif defined(CONFIG_MN10300) 212#elif defined(CONFIG_MN10300)
227 213
228/* 214/*
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8d4ccd35a016..8a7a23a84ac5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -435,16 +435,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
435 if (config.flags) 435 if (config.flags)
436 return -EINVAL; 436 return -EINVAL;
437 437
438 switch (config.tx_type) { 438 if (config.tx_type != HWTSTAMP_TX_OFF &&
439 case HWTSTAMP_TX_OFF: 439 config.tx_type != HWTSTAMP_TX_ON)
440 priv->hwts_tx_en = 0;
441 break;
442 case HWTSTAMP_TX_ON:
443 priv->hwts_tx_en = 1;
444 break;
445 default:
446 return -ERANGE; 440 return -ERANGE;
447 }
448 441
449 if (priv->adv_ts) { 442 if (priv->adv_ts) {
450 switch (config.rx_filter) { 443 switch (config.rx_filter) {
@@ -576,6 +569,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
576 } 569 }
577 } 570 }
578 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 571 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
572 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
579 573
580 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 574 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
581 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); 575 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index dd0dd6279b4e..4f1d2549130e 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2019 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO 2019 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
2020 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2020 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2021 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM 2021 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
2022 /*| NETIF_F_FRAGLIST */
2023 ; 2022 ;
2024 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 2023 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2025 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; 2024 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 90d41d26ec6d..5120d9ce1dd4 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -967,14 +967,19 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
967 priv->host_port, ALE_VLAN, slave->port_vlan); 967 priv->host_port, ALE_VLAN, slave->port_vlan);
968} 968}
969 969
970static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 970static void soft_reset_slave(struct cpsw_slave *slave)
971{ 971{
972 char name[32]; 972 char name[32];
973 u32 slave_port;
974
975 sprintf(name, "slave-%d", slave->slave_num);
976 973
974 snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
977 soft_reset(name, &slave->sliver->soft_reset); 975 soft_reset(name, &slave->sliver->soft_reset);
976}
977
978static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
979{
980 u32 slave_port;
981
982 soft_reset_slave(slave);
978 983
979 /* setup priority mapping */ 984 /* setup priority mapping */
980 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); 985 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
@@ -1146,6 +1151,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
1146 * receive descs 1151 * receive descs
1147 */ 1152 */
1148 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); 1153 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
1154
1155 if (cpts_register(&priv->pdev->dev, priv->cpts,
1156 priv->data.cpts_clock_mult,
1157 priv->data.cpts_clock_shift))
1158 dev_err(priv->dev, "error registering cpts device\n");
1159
1149 } 1160 }
1150 1161
1151 /* Enable Interrupt pacing if configured */ 1162 /* Enable Interrupt pacing if configured */
@@ -1192,6 +1203,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
1192 netif_carrier_off(priv->ndev); 1203 netif_carrier_off(priv->ndev);
1193 1204
1194 if (cpsw_common_res_usage_state(priv) <= 1) { 1205 if (cpsw_common_res_usage_state(priv) <= 1) {
1206 cpts_unregister(priv->cpts);
1195 cpsw_intr_disable(priv); 1207 cpsw_intr_disable(priv);
1196 cpdma_ctlr_int_ctrl(priv->dma, false); 1208 cpdma_ctlr_int_ctrl(priv->dma, false);
1197 cpdma_ctlr_stop(priv->dma); 1209 cpdma_ctlr_stop(priv->dma);
@@ -1323,6 +1335,10 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
1323 struct cpts *cpts = priv->cpts; 1335 struct cpts *cpts = priv->cpts;
1324 struct hwtstamp_config cfg; 1336 struct hwtstamp_config cfg;
1325 1337
1338 if (priv->version != CPSW_VERSION_1 &&
1339 priv->version != CPSW_VERSION_2)
1340 return -EOPNOTSUPP;
1341
1326 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1342 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1327 return -EFAULT; 1343 return -EFAULT;
1328 1344
@@ -1330,16 +1346,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
1330 if (cfg.flags) 1346 if (cfg.flags)
1331 return -EINVAL; 1347 return -EINVAL;
1332 1348
1333 switch (cfg.tx_type) { 1349 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
1334 case HWTSTAMP_TX_OFF:
1335 cpts->tx_enable = 0;
1336 break;
1337 case HWTSTAMP_TX_ON:
1338 cpts->tx_enable = 1;
1339 break;
1340 default:
1341 return -ERANGE; 1350 return -ERANGE;
1342 }
1343 1351
1344 switch (cfg.rx_filter) { 1352 switch (cfg.rx_filter) {
1345 case HWTSTAMP_FILTER_NONE: 1353 case HWTSTAMP_FILTER_NONE:
@@ -1366,6 +1374,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
1366 return -ERANGE; 1374 return -ERANGE;
1367 } 1375 }
1368 1376
1377 cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON;
1378
1369 switch (priv->version) { 1379 switch (priv->version) {
1370 case CPSW_VERSION_1: 1380 case CPSW_VERSION_1:
1371 cpsw_hwtstamp_v1(priv); 1381 cpsw_hwtstamp_v1(priv);
@@ -1374,7 +1384,7 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
1374 cpsw_hwtstamp_v2(priv); 1384 cpsw_hwtstamp_v2(priv);
1375 break; 1385 break;
1376 default: 1386 default:
1377 return -ENOTSUPP; 1387 WARN_ON(1);
1378 } 1388 }
1379 1389
1380 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1390 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
@@ -1813,6 +1823,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1813 } 1823 }
1814 1824
1815 i++; 1825 i++;
1826 if (i == data->slaves)
1827 break;
1816 } 1828 }
1817 1829
1818 return 0; 1830 return 0;
@@ -1980,9 +1992,15 @@ static int cpsw_probe(struct platform_device *pdev)
1980 goto clean_runtime_disable_ret; 1992 goto clean_runtime_disable_ret;
1981 } 1993 }
1982 priv->regs = ss_regs; 1994 priv->regs = ss_regs;
1983 priv->version = __raw_readl(&priv->regs->id_ver);
1984 priv->host_port = HOST_PORT_NUM; 1995 priv->host_port = HOST_PORT_NUM;
1985 1996
1997 /* Need to enable clocks with runtime PM api to access module
1998 * registers
1999 */
2000 pm_runtime_get_sync(&pdev->dev);
2001 priv->version = readl(&priv->regs->id_ver);
2002 pm_runtime_put_sync(&pdev->dev);
2003
1986 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2004 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1987 priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); 2005 priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
1988 if (IS_ERR(priv->wr_regs)) { 2006 if (IS_ERR(priv->wr_regs)) {
@@ -2152,8 +2170,6 @@ static int cpsw_remove(struct platform_device *pdev)
2152 unregister_netdev(cpsw_get_slave_ndev(priv, 1)); 2170 unregister_netdev(cpsw_get_slave_ndev(priv, 1));
2153 unregister_netdev(ndev); 2171 unregister_netdev(ndev);
2154 2172
2155 cpts_unregister(priv->cpts);
2156
2157 cpsw_ale_destroy(priv->ale); 2173 cpsw_ale_destroy(priv->ale);
2158 cpdma_chan_destroy(priv->txch); 2174 cpdma_chan_destroy(priv->txch);
2159 cpdma_chan_destroy(priv->rxch); 2175 cpdma_chan_destroy(priv->rxch);
@@ -2173,8 +2189,9 @@ static int cpsw_suspend(struct device *dev)
2173 2189
2174 if (netif_running(ndev)) 2190 if (netif_running(ndev))
2175 cpsw_ndo_stop(ndev); 2191 cpsw_ndo_stop(ndev);
2176 soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset); 2192
2177 soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset); 2193 for_each_slave(priv, soft_reset_slave);
2194
2178 pm_runtime_put_sync(&pdev->dev); 2195 pm_runtime_put_sync(&pdev->dev);
2179 2196
2180 /* Select sleep pin state */ 2197 /* Select sleep pin state */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 41ba974bf37c..cd9b164a0434 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -61,6 +61,7 @@
61#include <linux/davinci_emac.h> 61#include <linux/davinci_emac.h>
62#include <linux/of.h> 62#include <linux/of.h>
63#include <linux/of_address.h> 63#include <linux/of_address.h>
64#include <linux/of_device.h>
64#include <linux/of_irq.h> 65#include <linux/of_irq.h>
65#include <linux/of_net.h> 66#include <linux/of_net.h>
66 67
@@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = {
1752#endif 1753#endif
1753}; 1754};
1754 1755
1756static const struct of_device_id davinci_emac_of_match[];
1757
1755static struct emac_platform_data * 1758static struct emac_platform_data *
1756davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) 1759davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1757{ 1760{
1758 struct device_node *np; 1761 struct device_node *np;
1762 const struct of_device_id *match;
1763 const struct emac_platform_data *auxdata;
1759 struct emac_platform_data *pdata = NULL; 1764 struct emac_platform_data *pdata = NULL;
1760 const u8 *mac_addr; 1765 const u8 *mac_addr;
1761 1766
@@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1793 1798
1794 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 1799 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
1795 if (!priv->phy_node) 1800 if (!priv->phy_node)
1796 pdata->phy_id = ""; 1801 pdata->phy_id = NULL;
1802
1803 auxdata = pdev->dev.platform_data;
1804 if (auxdata) {
1805 pdata->interrupt_enable = auxdata->interrupt_enable;
1806 pdata->interrupt_disable = auxdata->interrupt_disable;
1807 }
1808
1809 match = of_match_device(davinci_emac_of_match, &pdev->dev);
1810 if (match && match->data) {
1811 auxdata = match->data;
1812 pdata->version = auxdata->version;
1813 pdata->hw_ram_addr = auxdata->hw_ram_addr;
1814 }
1797 1815
1798 pdev->dev.platform_data = pdata; 1816 pdev->dev.platform_data = pdata;
1799 1817
@@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
2020}; 2038};
2021 2039
2022#if IS_ENABLED(CONFIG_OF) 2040#if IS_ENABLED(CONFIG_OF)
2041static const struct emac_platform_data am3517_emac_data = {
2042 .version = EMAC_VERSION_2,
2043 .hw_ram_addr = 0x01e20000,
2044};
2045
2023static const struct of_device_id davinci_emac_of_match[] = { 2046static const struct of_device_id davinci_emac_of_match[] = {
2024 {.compatible = "ti,davinci-dm6467-emac", }, 2047 {.compatible = "ti,davinci-dm6467-emac", },
2048 {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
2025 {}, 2049 {},
2026}; 2050};
2027MODULE_DEVICE_TABLE(of, davinci_emac_of_match); 2051MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d022bf936572..ad61d26a44f3 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget)
2172 unsigned int rx_done; 2172 unsigned int rx_done;
2173 unsigned long flags; 2173 unsigned long flags;
2174 2174
2175 spin_lock_irqsave(&vptr->lock, flags);
2176 /* 2175 /*
2177 * Do rx and tx twice for performance (taken from the VIA 2176 * Do rx and tx twice for performance (taken from the VIA
2178 * out-of-tree driver). 2177 * out-of-tree driver).
2179 */ 2178 */
2180 rx_done = velocity_rx_srv(vptr, budget / 2); 2179 rx_done = velocity_rx_srv(vptr, budget);
2181 velocity_tx_srv(vptr); 2180 spin_lock_irqsave(&vptr->lock, flags);
2182 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2183 velocity_tx_srv(vptr); 2181 velocity_tx_srv(vptr);
2184
2185 /* If budget not fully consumed, exit the polling mode */ 2182 /* If budget not fully consumed, exit the polling mode */
2186 if (rx_done < budget) { 2183 if (rx_done < budget) {
2187 napi_complete(napi); 2184 napi_complete(napi);
@@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2342 if (ret < 0) 2339 if (ret < 0)
2343 goto out_free_tmp_vptr_1; 2340 goto out_free_tmp_vptr_1;
2344 2341
2342 napi_disable(&vptr->napi);
2343
2345 spin_lock_irqsave(&vptr->lock, flags); 2344 spin_lock_irqsave(&vptr->lock, flags);
2346 2345
2347 netif_stop_queue(dev); 2346 netif_stop_queue(dev);
@@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2362 2361
2363 velocity_give_many_rx_descs(vptr); 2362 velocity_give_many_rx_descs(vptr);
2364 2363
2364 napi_enable(&vptr->napi);
2365
2365 mac_enable_int(vptr->mac_regs); 2366 mac_enable_int(vptr->mac_regs);
2366 netif_start_queue(dev); 2367 netif_start_queue(dev);
2367 2368
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1f2364126323..2166e879a096 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1017,7 +1017,7 @@ static int temac_of_probe(struct platform_device *op)
1017 platform_set_drvdata(op, ndev); 1017 platform_set_drvdata(op, ndev);
1018 SET_NETDEV_DEV(ndev, &op->dev); 1018 SET_NETDEV_DEV(ndev, &op->dev);
1019 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1019 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1020 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; 1020 ndev->features = NETIF_F_SG;
1021 ndev->netdev_ops = &temac_netdev_ops; 1021 ndev->netdev_ops = &temac_netdev_ops;
1022 ndev->ethtool_ops = &temac_ethtool_ops; 1022 ndev->ethtool_ops = &temac_ethtool_ops;
1023#if 0 1023#if 0
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b2ff038d6d20..f9293da19e26 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op)
1486 1486
1487 SET_NETDEV_DEV(ndev, &op->dev); 1487 SET_NETDEV_DEV(ndev, &op->dev);
1488 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1488 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1489 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; 1489 ndev->features = NETIF_F_SG;
1490 ndev->netdev_ops = &axienet_netdev_ops; 1490 ndev->netdev_ops = &axienet_netdev_ops;
1491 ndev->ethtool_ops = &axienet_ethtool_ops; 1491 ndev->ethtool_ops = &axienet_ethtool_ops;
1492 1492
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 74234a51c851..fefb8cd5eb65 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -163,26 +163,9 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
163 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, 163 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
164 drvdata->base_addr + XEL_TSR_OFFSET); 164 drvdata->base_addr + XEL_TSR_OFFSET);
165 165
166 /* Enable the Tx interrupts for the second Buffer if
167 * configured in HW */
168 if (drvdata->tx_ping_pong != 0) {
169 reg_data = __raw_readl(drvdata->base_addr +
170 XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
171 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
172 drvdata->base_addr + XEL_BUFFER_OFFSET +
173 XEL_TSR_OFFSET);
174 }
175
176 /* Enable the Rx interrupts for the first buffer */ 166 /* Enable the Rx interrupts for the first buffer */
177 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); 167 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
178 168
179 /* Enable the Rx interrupts for the second Buffer if
180 * configured in HW */
181 if (drvdata->rx_ping_pong != 0) {
182 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr +
183 XEL_BUFFER_OFFSET + XEL_RSR_OFFSET);
184 }
185
186 /* Enable the Global Interrupt Enable */ 169 /* Enable the Global Interrupt Enable */
187 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); 170 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
188} 171}
@@ -206,31 +189,10 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
206 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), 189 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
207 drvdata->base_addr + XEL_TSR_OFFSET); 190 drvdata->base_addr + XEL_TSR_OFFSET);
208 191
209 /* Disable the Tx interrupts for the second Buffer
210 * if configured in HW */
211 if (drvdata->tx_ping_pong != 0) {
212 reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
213 XEL_TSR_OFFSET);
214 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
215 drvdata->base_addr + XEL_BUFFER_OFFSET +
216 XEL_TSR_OFFSET);
217 }
218
219 /* Disable the Rx interrupts for the first buffer */ 192 /* Disable the Rx interrupts for the first buffer */
220 reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); 193 reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
221 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), 194 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
222 drvdata->base_addr + XEL_RSR_OFFSET); 195 drvdata->base_addr + XEL_RSR_OFFSET);
223
224 /* Disable the Rx interrupts for the second buffer
225 * if configured in HW */
226 if (drvdata->rx_ping_pong != 0) {
227
228 reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
229 XEL_RSR_OFFSET);
230 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
231 drvdata->base_addr + XEL_BUFFER_OFFSET +
232 XEL_RSR_OFFSET);
233 }
234} 196}
235 197
236/** 198/**
@@ -258,6 +220,13 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
258 *to_u16_ptr++ = *from_u16_ptr++; 220 *to_u16_ptr++ = *from_u16_ptr++;
259 *to_u16_ptr++ = *from_u16_ptr++; 221 *to_u16_ptr++ = *from_u16_ptr++;
260 222
223 /* This barrier resolves occasional issues seen around
224 * cases where the data is not properly flushed out
225 * from the processor store buffers to the destination
226 * memory locations.
227 */
228 wmb();
229
261 /* Output a word */ 230 /* Output a word */
262 *to_u32_ptr++ = align_buffer; 231 *to_u32_ptr++ = align_buffer;
263 } 232 }
@@ -273,6 +242,12 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
273 for (; length > 0; length--) 242 for (; length > 0; length--)
274 *to_u8_ptr++ = *from_u8_ptr++; 243 *to_u8_ptr++ = *from_u8_ptr++;
275 244
245 /* This barrier resolves occasional issues seen around
246 * cases where the data is not properly flushed out
247 * from the processor store buffers to the destination
248 * memory locations.
249 */
250 wmb();
276 *to_u32_ptr = align_buffer; 251 *to_u32_ptr = align_buffer;
277 } 252 }
278} 253}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index e78802e75ea6..bcc224a83734 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -389,16 +389,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
389 ch = PORT2CHANNEL(port); 389 ch = PORT2CHANNEL(port);
390 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; 390 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
391 391
392 switch (cfg.tx_type) { 392 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
393 case HWTSTAMP_TX_OFF:
394 port->hwts_tx_en = 0;
395 break;
396 case HWTSTAMP_TX_ON:
397 port->hwts_tx_en = 1;
398 break;
399 default:
400 return -ERANGE; 393 return -ERANGE;
401 }
402 394
403 switch (cfg.rx_filter) { 395 switch (cfg.rx_filter) {
404 case HWTSTAMP_FILTER_NONE: 396 case HWTSTAMP_FILTER_NONE:
@@ -416,6 +408,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
416 return -ERANGE; 408 return -ERANGE;
417 } 409 }
418 410
411 port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
412
419 /* Clear out any old time stamps. */ 413 /* Clear out any old time stamps. */
420 __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, 414 __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
421 &regs->channel[ch].ch_event); 415 &regs->channel[ch].ch_event);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 9dccb1edfd2a..2a89da080317 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -628,6 +628,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
628 const struct iovec *iv, unsigned long total_len, 628 const struct iovec *iv, unsigned long total_len,
629 size_t count, int noblock) 629 size_t count, int noblock)
630{ 630{
631 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
631 struct sk_buff *skb; 632 struct sk_buff *skb;
632 struct macvlan_dev *vlan; 633 struct macvlan_dev *vlan;
633 unsigned long len = total_len; 634 unsigned long len = total_len;
@@ -670,6 +671,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
670 671
671 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { 672 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
672 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; 673 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
674 if (copylen > good_linear)
675 copylen = good_linear;
673 linear = copylen; 676 linear = copylen;
674 if (iov_pages(iv, vnet_hdr_len + copylen, count) 677 if (iov_pages(iv, vnet_hdr_len + copylen, count)
675 <= MAX_SKB_FRAGS) 678 <= MAX_SKB_FRAGS)
@@ -678,7 +681,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
678 681
679 if (!zerocopy) { 682 if (!zerocopy) {
680 copylen = len; 683 copylen = len;
681 linear = vnet_hdr.hdr_len; 684 if (vnet_hdr.hdr_len > good_linear)
685 linear = good_linear;
686 else
687 linear = vnet_hdr.hdr_len;
682 } 688 }
683 689
684 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, 690 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
@@ -738,7 +744,7 @@ err:
738 rcu_read_lock(); 744 rcu_read_lock();
739 vlan = rcu_dereference(q->vlan); 745 vlan = rcu_dereference(q->vlan);
740 if (vlan) 746 if (vlan)
741 vlan->dev->stats.tx_dropped++; 747 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
742 rcu_read_unlock(); 748 rcu_read_unlock();
743 749
744 return err; 750 return err;
@@ -761,11 +767,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
761 const struct sk_buff *skb, 767 const struct sk_buff *skb,
762 const struct iovec *iv, int len) 768 const struct iovec *iv, int len)
763{ 769{
764 struct macvlan_dev *vlan;
765 int ret; 770 int ret;
766 int vnet_hdr_len = 0; 771 int vnet_hdr_len = 0;
767 int vlan_offset = 0; 772 int vlan_offset = 0;
768 int copied; 773 int copied, total;
769 774
770 if (q->flags & IFF_VNET_HDR) { 775 if (q->flags & IFF_VNET_HDR) {
771 struct virtio_net_hdr vnet_hdr; 776 struct virtio_net_hdr vnet_hdr;
@@ -780,7 +785,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
780 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) 785 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
781 return -EFAULT; 786 return -EFAULT;
782 } 787 }
783 copied = vnet_hdr_len; 788 total = copied = vnet_hdr_len;
789 total += skb->len;
784 790
785 if (!vlan_tx_tag_present(skb)) 791 if (!vlan_tx_tag_present(skb))
786 len = min_t(int, skb->len, len); 792 len = min_t(int, skb->len, len);
@@ -795,6 +801,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
795 801
796 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 802 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
797 len = min_t(int, skb->len + VLAN_HLEN, len); 803 len = min_t(int, skb->len + VLAN_HLEN, len);
804 total += VLAN_HLEN;
798 805
799 copy = min_t(int, vlan_offset, len); 806 copy = min_t(int, vlan_offset, len);
800 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); 807 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
@@ -812,19 +819,9 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
812 } 819 }
813 820
814 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); 821 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
815 copied += len;
816 822
817done: 823done:
818 rcu_read_lock(); 824 return ret ? ret : total;
819 vlan = rcu_dereference(q->vlan);
820 if (vlan) {
821 preempt_disable();
822 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
823 preempt_enable();
824 }
825 rcu_read_unlock();
826
827 return ret ? ret : copied;
828} 825}
829 826
830static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb, 827static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
@@ -879,7 +876,9 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
879 } 876 }
880 877
881 ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK); 878 ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
882 ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */ 879 ret = min_t(ssize_t, ret, len);
880 if (ret > 0)
881 iocb->ki_pos = ret;
883out: 882out:
884 return ret; 883 return ret;
885} 884}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3ae28f420868..26fa05a472b4 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -336,6 +336,21 @@ static struct phy_driver ksphy_driver[] = {
336 .resume = genphy_resume, 336 .resume = genphy_resume,
337 .driver = { .owner = THIS_MODULE,}, 337 .driver = { .owner = THIS_MODULE,},
338}, { 338}, {
339 .phy_id = PHY_ID_KSZ8041RNLI,
340 .phy_id_mask = 0x00fffff0,
341 .name = "Micrel KSZ8041RNLI",
342 .features = PHY_BASIC_FEATURES |
343 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
344 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
345 .config_init = kszphy_config_init,
346 .config_aneg = genphy_config_aneg,
347 .read_status = genphy_read_status,
348 .ack_interrupt = kszphy_ack_interrupt,
349 .config_intr = kszphy_config_intr,
350 .suspend = genphy_suspend,
351 .resume = genphy_resume,
352 .driver = { .owner = THIS_MODULE,},
353}, {
339 .phy_id = PHY_ID_KSZ8051, 354 .phy_id = PHY_ID_KSZ8051,
340 .phy_id_mask = 0x00fffff0, 355 .phy_id_mask = 0x00fffff0,
341 .name = "Micrel KSZ8051", 356 .name = "Micrel KSZ8051",
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 74630e94fa3b..d6447b3f7409 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -697,7 +697,7 @@ static int genphy_config_advert(struct phy_device *phydev)
697 * to the values in phydev. Assumes that the values are valid. 697 * to the values in phydev. Assumes that the values are valid.
698 * Please see phy_sanitize_settings(). 698 * Please see phy_sanitize_settings().
699 */ 699 */
700static int genphy_setup_forced(struct phy_device *phydev) 700int genphy_setup_forced(struct phy_device *phydev)
701{ 701{
702 int err; 702 int err;
703 int ctl = 0; 703 int ctl = 0;
@@ -716,7 +716,7 @@ static int genphy_setup_forced(struct phy_device *phydev)
716 716
717 return err; 717 return err;
718} 718}
719 719EXPORT_SYMBOL(genphy_setup_forced);
720 720
721/** 721/**
722 * genphy_restart_aneg - Enable and Restart Autonegotiation 722 * genphy_restart_aneg - Enable and Restart Autonegotiation
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 69b482bce7d2..14372c65a7e8 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Author: Kriston Carson 4 * Author: Kriston Carson
5 * 5 *
6 * Copyright (c) 2005, 2009 Freescale Semiconductor, Inc. 6 * Copyright (c) 2005, 2009, 2011 Freescale Semiconductor, Inc.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
@@ -18,6 +18,11 @@
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/phy.h> 19#include <linux/phy.h>
20 20
21/* Vitesse Extended Page Magic Register(s) */
22#define MII_VSC82X4_EXT_PAGE_16E 0x10
23#define MII_VSC82X4_EXT_PAGE_17E 0x11
24#define MII_VSC82X4_EXT_PAGE_18E 0x12
25
21/* Vitesse Extended Control Register 1 */ 26/* Vitesse Extended Control Register 1 */
22#define MII_VSC8244_EXT_CON1 0x17 27#define MII_VSC8244_EXT_CON1 0x17
23#define MII_VSC8244_EXTCON1_INIT 0x0000 28#define MII_VSC8244_EXTCON1_INIT 0x0000
@@ -54,7 +59,14 @@
54#define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */ 59#define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */
55#define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004 60#define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004
56 61
62/* Vitesse Extended Page Access Register */
63#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
64
65#define PHY_ID_VSC8234 0x000fc620
57#define PHY_ID_VSC8244 0x000fc6c0 66#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670
68#define PHY_ID_VSC8574 0x000704a0
69#define PHY_ID_VSC8662 0x00070660
58#define PHY_ID_VSC8221 0x000fc550 70#define PHY_ID_VSC8221 0x000fc550
59#define PHY_ID_VSC8211 0x000fc4b0 71#define PHY_ID_VSC8211 0x000fc4b0
60 72
@@ -118,7 +130,10 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
118 130
119 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 131 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
120 err = phy_write(phydev, MII_VSC8244_IMASK, 132 err = phy_write(phydev, MII_VSC8244_IMASK,
121 phydev->drv->phy_id == PHY_ID_VSC8244 ? 133 (phydev->drv->phy_id == PHY_ID_VSC8234 ||
134 phydev->drv->phy_id == PHY_ID_VSC8244 ||
135 phydev->drv->phy_id == PHY_ID_VSC8514 ||
136 phydev->drv->phy_id == PHY_ID_VSC8574) ?
122 MII_VSC8244_IMASK_MASK : 137 MII_VSC8244_IMASK_MASK :
123 MII_VSC8221_IMASK_MASK); 138 MII_VSC8221_IMASK_MASK);
124 else { 139 else {
@@ -149,21 +164,126 @@ static int vsc8221_config_init(struct phy_device *phydev)
149 */ 164 */
150} 165}
151 166
152/* Vitesse 824x */ 167/* vsc82x4_config_autocross_enable - Enable auto MDI/MDI-X for forced links
168 * @phydev: target phy_device struct
169 *
170 * Enable auto MDI/MDI-X when in 10/100 forced link speeds by writing
171 * special values in the VSC8234/VSC8244 extended reserved registers
172 */
173static int vsc82x4_config_autocross_enable(struct phy_device *phydev)
174{
175 int ret;
176
177 if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed > SPEED_100)
178 return 0;
179
180 /* map extended registers set 0x10 - 0x1e */
181 ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x52b5);
182 if (ret >= 0)
183 ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_18E, 0x0012);
184 if (ret >= 0)
185 ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_17E, 0x2803);
186 if (ret >= 0)
187 ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_16E, 0x87fa);
188 /* map standard registers set 0x10 - 0x1e */
189 if (ret >= 0)
190 ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x0000);
191 else
192 phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x0000);
193
194 return ret;
195}
196
197/* vsc82x4_config_aneg - restart auto-negotiation or write BMCR
198 * @phydev: target phy_device struct
199 *
200 * Description: If auto-negotiation is enabled, we configure the
201 * advertising, and then restart auto-negotiation. If it is not
202 * enabled, then we write the BMCR and also start the auto
203 * MDI/MDI-X feature
204 */
205static int vsc82x4_config_aneg(struct phy_device *phydev)
206{
207 int ret;
208
209 /* Enable auto MDI/MDI-X when in 10/100 forced link speeds by
210 * writing special values in the VSC8234 extended reserved registers
211 */
212 if (phydev->autoneg != AUTONEG_ENABLE && phydev->speed <= SPEED_100) {
213 ret = genphy_setup_forced(phydev);
214
215 if (ret < 0) /* error */
216 return ret;
217
218 return vsc82x4_config_autocross_enable(phydev);
219 }
220
221 return genphy_config_aneg(phydev);
222}
223
224/* Vitesse 82xx */
153static struct phy_driver vsc82xx_driver[] = { 225static struct phy_driver vsc82xx_driver[] = {
154{ 226{
227 .phy_id = PHY_ID_VSC8234,
228 .name = "Vitesse VSC8234",
229 .phy_id_mask = 0x000ffff0,
230 .features = PHY_GBIT_FEATURES,
231 .flags = PHY_HAS_INTERRUPT,
232 .config_init = &vsc824x_config_init,
233 .config_aneg = &vsc82x4_config_aneg,
234 .read_status = &genphy_read_status,
235 .ack_interrupt = &vsc824x_ack_interrupt,
236 .config_intr = &vsc82xx_config_intr,
237 .driver = { .owner = THIS_MODULE,},
238}, {
155 .phy_id = PHY_ID_VSC8244, 239 .phy_id = PHY_ID_VSC8244,
156 .name = "Vitesse VSC8244", 240 .name = "Vitesse VSC8244",
157 .phy_id_mask = 0x000fffc0, 241 .phy_id_mask = 0x000fffc0,
158 .features = PHY_GBIT_FEATURES, 242 .features = PHY_GBIT_FEATURES,
159 .flags = PHY_HAS_INTERRUPT, 243 .flags = PHY_HAS_INTERRUPT,
160 .config_init = &vsc824x_config_init, 244 .config_init = &vsc824x_config_init,
161 .config_aneg = &genphy_config_aneg, 245 .config_aneg = &vsc82x4_config_aneg,
162 .read_status = &genphy_read_status, 246 .read_status = &genphy_read_status,
163 .ack_interrupt = &vsc824x_ack_interrupt, 247 .ack_interrupt = &vsc824x_ack_interrupt,
164 .config_intr = &vsc82xx_config_intr, 248 .config_intr = &vsc82xx_config_intr,
165 .driver = { .owner = THIS_MODULE,}, 249 .driver = { .owner = THIS_MODULE,},
166}, { 250}, {
251 .phy_id = PHY_ID_VSC8514,
252 .name = "Vitesse VSC8514",
253 .phy_id_mask = 0x000ffff0,
254 .features = PHY_GBIT_FEATURES,
255 .flags = PHY_HAS_INTERRUPT,
256 .config_init = &vsc824x_config_init,
257 .config_aneg = &vsc82x4_config_aneg,
258 .read_status = &genphy_read_status,
259 .ack_interrupt = &vsc824x_ack_interrupt,
260 .config_intr = &vsc82xx_config_intr,
261 .driver = { .owner = THIS_MODULE,},
262}, {
263 .phy_id = PHY_ID_VSC8574,
264 .name = "Vitesse VSC8574",
265 .phy_id_mask = 0x000ffff0,
266 .features = PHY_GBIT_FEATURES,
267 .flags = PHY_HAS_INTERRUPT,
268 .config_init = &vsc824x_config_init,
269 .config_aneg = &vsc82x4_config_aneg,
270 .read_status = &genphy_read_status,
271 .ack_interrupt = &vsc824x_ack_interrupt,
272 .config_intr = &vsc82xx_config_intr,
273 .driver = { .owner = THIS_MODULE,},
274}, {
275 .phy_id = PHY_ID_VSC8662,
276 .name = "Vitesse VSC8662",
277 .phy_id_mask = 0x000ffff0,
278 .features = PHY_GBIT_FEATURES,
279 .flags = PHY_HAS_INTERRUPT,
280 .config_init = &vsc824x_config_init,
281 .config_aneg = &vsc82x4_config_aneg,
282 .read_status = &genphy_read_status,
283 .ack_interrupt = &vsc824x_ack_interrupt,
284 .config_intr = &vsc82xx_config_intr,
285 .driver = { .owner = THIS_MODULE,},
286}, {
167 /* Vitesse 8221 */ 287 /* Vitesse 8221 */
168 .phy_id = PHY_ID_VSC8221, 288 .phy_id = PHY_ID_VSC8221,
169 .phy_id_mask = 0x000ffff0, 289 .phy_id_mask = 0x000ffff0,
@@ -207,7 +327,11 @@ module_init(vsc82xx_init);
207module_exit(vsc82xx_exit); 327module_exit(vsc82xx_exit);
208 328
209static struct mdio_device_id __maybe_unused vitesse_tbl[] = { 329static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
330 { PHY_ID_VSC8234, 0x000ffff0 },
210 { PHY_ID_VSC8244, 0x000fffc0 }, 331 { PHY_ID_VSC8244, 0x000fffc0 },
332 { PHY_ID_VSC8514, 0x000ffff0 },
333 { PHY_ID_VSC8574, 0x000ffff0 },
334 { PHY_ID_VSC8662, 0x000ffff0 },
211 { PHY_ID_VSC8221, 0x000ffff0 }, 335 { PHY_ID_VSC8221, 0x000ffff0 },
212 { PHY_ID_VSC8211, 0x000ffff0 }, 336 { PHY_ID_VSC8211, 0x000ffff0 },
213 { } 337 { }
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 5f66e30d9823..82ee6ed954cb 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -979,8 +979,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
979 if (error < 0) 979 if (error < 0)
980 goto end; 980 goto end;
981 981
982 m->msg_namelen = 0;
983
984 if (skb) { 982 if (skb) {
985 total_len = min_t(size_t, total_len, skb->len); 983 total_len = min_t(size_t, total_len, skb->len);
986 error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); 984 error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6574eb8766f9..736050d6b451 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1366,6 +1366,8 @@ static int team_user_linkup_option_get(struct team *team,
1366 return 0; 1366 return 0;
1367} 1367}
1368 1368
1369static void __team_carrier_check(struct team *team);
1370
1369static int team_user_linkup_option_set(struct team *team, 1371static int team_user_linkup_option_set(struct team *team,
1370 struct team_gsetter_ctx *ctx) 1372 struct team_gsetter_ctx *ctx)
1371{ 1373{
@@ -1373,6 +1375,7 @@ static int team_user_linkup_option_set(struct team *team,
1373 1375
1374 port->user.linkup = ctx->data.bool_val; 1376 port->user.linkup = ctx->data.bool_val;
1375 team_refresh_port_linkup(port); 1377 team_refresh_port_linkup(port);
1378 __team_carrier_check(port->team);
1376 return 0; 1379 return 0;
1377} 1380}
1378 1381
@@ -1392,6 +1395,7 @@ static int team_user_linkup_en_option_set(struct team *team,
1392 1395
1393 port->user.linkup_enabled = ctx->data.bool_val; 1396 port->user.linkup_enabled = ctx->data.bool_val;
1394 team_refresh_port_linkup(port); 1397 team_refresh_port_linkup(port);
1398 __team_carrier_check(port->team);
1395 return 0; 1399 return 0;
1396} 1400}
1397 1401
@@ -2650,7 +2654,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2650 return err; 2654 return err;
2651} 2655}
2652 2656
2653static struct genl_ops team_nl_ops[] = { 2657static const struct genl_ops team_nl_ops[] = {
2654 { 2658 {
2655 .cmd = TEAM_CMD_NOOP, 2659 .cmd = TEAM_CMD_NOOP,
2656 .doit = team_nl_cmd_noop, 2660 .doit = team_nl_cmd_noop,
@@ -2676,15 +2680,15 @@ static struct genl_ops team_nl_ops[] = {
2676 }, 2680 },
2677}; 2681};
2678 2682
2679static struct genl_multicast_group team_change_event_mcgrp = { 2683static const struct genl_multicast_group team_nl_mcgrps[] = {
2680 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, 2684 { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2681}; 2685};
2682 2686
2683static int team_nl_send_multicast(struct sk_buff *skb, 2687static int team_nl_send_multicast(struct sk_buff *skb,
2684 struct team *team, u32 portid) 2688 struct team *team, u32 portid)
2685{ 2689{
2686 return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, 2690 return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2687 team_change_event_mcgrp.id, GFP_KERNEL); 2691 skb, 0, 0, GFP_KERNEL);
2688} 2692}
2689 2693
2690static int team_nl_send_event_options_get(struct team *team, 2694static int team_nl_send_event_options_get(struct team *team,
@@ -2703,23 +2707,8 @@ static int team_nl_send_event_port_get(struct team *team,
2703 2707
2704static int team_nl_init(void) 2708static int team_nl_init(void)
2705{ 2709{
2706 int err; 2710 return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops,
2707 2711 team_nl_mcgrps);
2708 err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
2709 ARRAY_SIZE(team_nl_ops));
2710 if (err)
2711 return err;
2712
2713 err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
2714 if (err)
2715 goto err_change_event_grp_reg;
2716
2717 return 0;
2718
2719err_change_event_grp_reg:
2720 genl_unregister_family(&team_nl_family);
2721
2722 return err;
2723} 2712}
2724 2713
2725static void team_nl_fini(void) 2714static void team_nl_fini(void)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7cb105c103fe..7c8343a4f918 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -981,6 +981,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
981 struct sk_buff *skb; 981 struct sk_buff *skb;
982 size_t len = total_len, align = NET_SKB_PAD, linear; 982 size_t len = total_len, align = NET_SKB_PAD, linear;
983 struct virtio_net_hdr gso = { 0 }; 983 struct virtio_net_hdr gso = { 0 };
984 int good_linear;
984 int offset = 0; 985 int offset = 0;
985 int copylen; 986 int copylen;
986 bool zerocopy = false; 987 bool zerocopy = false;
@@ -1021,12 +1022,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1021 return -EINVAL; 1022 return -EINVAL;
1022 } 1023 }
1023 1024
1025 good_linear = SKB_MAX_HEAD(align);
1026
1024 if (msg_control) { 1027 if (msg_control) {
1025 /* There are 256 bytes to be copied in skb, so there is 1028 /* There are 256 bytes to be copied in skb, so there is
1026 * enough room for skb expand head in case it is used. 1029 * enough room for skb expand head in case it is used.
1027 * The rest of the buffer is mapped from userspace. 1030 * The rest of the buffer is mapped from userspace.
1028 */ 1031 */
1029 copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; 1032 copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
1033 if (copylen > good_linear)
1034 copylen = good_linear;
1030 linear = copylen; 1035 linear = copylen;
1031 if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) 1036 if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
1032 zerocopy = true; 1037 zerocopy = true;
@@ -1034,7 +1039,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1034 1039
1035 if (!zerocopy) { 1040 if (!zerocopy) {
1036 copylen = len; 1041 copylen = len;
1037 linear = gso.hdr_len; 1042 if (gso.hdr_len > good_linear)
1043 linear = good_linear;
1044 else
1045 linear = gso.hdr_len;
1038 } 1046 }
1039 1047
1040 skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); 1048 skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
@@ -1176,7 +1184,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1176{ 1184{
1177 struct tun_pi pi = { 0, skb->protocol }; 1185 struct tun_pi pi = { 0, skb->protocol };
1178 ssize_t total = 0; 1186 ssize_t total = 0;
1179 int vlan_offset = 0; 1187 int vlan_offset = 0, copied;
1180 1188
1181 if (!(tun->flags & TUN_NO_PI)) { 1189 if (!(tun->flags & TUN_NO_PI)) {
1182 if ((len -= sizeof(pi)) < 0) 1190 if ((len -= sizeof(pi)) < 0)
@@ -1240,6 +1248,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1240 total += tun->vnet_hdr_sz; 1248 total += tun->vnet_hdr_sz;
1241 } 1249 }
1242 1250
1251 copied = total;
1252 total += skb->len;
1243 if (!vlan_tx_tag_present(skb)) { 1253 if (!vlan_tx_tag_present(skb)) {
1244 len = min_t(int, skb->len, len); 1254 len = min_t(int, skb->len, len);
1245 } else { 1255 } else {
@@ -1254,24 +1264,24 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1254 1264
1255 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 1265 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1256 len = min_t(int, skb->len + VLAN_HLEN, len); 1266 len = min_t(int, skb->len + VLAN_HLEN, len);
1267 total += VLAN_HLEN;
1257 1268
1258 copy = min_t(int, vlan_offset, len); 1269 copy = min_t(int, vlan_offset, len);
1259 ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy); 1270 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
1260 len -= copy; 1271 len -= copy;
1261 total += copy; 1272 copied += copy;
1262 if (ret || !len) 1273 if (ret || !len)
1263 goto done; 1274 goto done;
1264 1275
1265 copy = min_t(int, sizeof(veth), len); 1276 copy = min_t(int, sizeof(veth), len);
1266 ret = memcpy_toiovecend(iv, (void *)&veth, total, copy); 1277 ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
1267 len -= copy; 1278 len -= copy;
1268 total += copy; 1279 copied += copy;
1269 if (ret || !len) 1280 if (ret || !len)
1270 goto done; 1281 goto done;
1271 } 1282 }
1272 1283
1273 skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len); 1284 skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
1274 total += len;
1275 1285
1276done: 1286done:
1277 tun->dev->stats.tx_packets++; 1287 tun->dev->stats.tx_packets++;
@@ -1348,6 +1358,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1348 ret = tun_do_read(tun, tfile, iocb, iv, len, 1358 ret = tun_do_read(tun, tfile, iocb, iv, len,
1349 file->f_flags & O_NONBLOCK); 1359 file->f_flags & O_NONBLOCK);
1350 ret = min_t(ssize_t, ret, len); 1360 ret = min_t(ssize_t, ret, len);
1361 if (ret > 0)
1362 iocb->ki_pos = ret;
1351out: 1363out:
1352 tun_put(tun); 1364 tun_put(tun);
1353 return ret; 1365 return ret;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f74786aa37be..e15ec2b12035 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -66,7 +66,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
66static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); 66static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
67static struct usb_driver cdc_ncm_driver; 67static struct usb_driver cdc_ncm_driver;
68 68
69static u8 cdc_ncm_setup(struct usbnet *dev) 69static int cdc_ncm_setup(struct usbnet *dev)
70{ 70{
71 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; 71 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
72 struct usb_cdc_ncm_ntb_parameters ncm_parm; 72 struct usb_cdc_ncm_ntb_parameters ncm_parm;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f3fce412c0c1..51073721e224 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -24,7 +24,7 @@
24#include <linux/ipv6.h> 24#include <linux/ipv6.h>
25 25
26/* Version Information */ 26/* Version Information */
27#define DRIVER_VERSION "v1.01.0 (2013/08/12)" 27#define DRIVER_VERSION "v1.02.0 (2013/10/28)"
28#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 28#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
29#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters" 29#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
30#define MODULENAME "r8152" 30#define MODULENAME "r8152"
@@ -307,22 +307,22 @@ enum rtl8152_flags {
307#define MCU_TYPE_USB 0x0000 307#define MCU_TYPE_USB 0x0000
308 308
309struct rx_desc { 309struct rx_desc {
310 u32 opts1; 310 __le32 opts1;
311#define RX_LEN_MASK 0x7fff 311#define RX_LEN_MASK 0x7fff
312 u32 opts2; 312 __le32 opts2;
313 u32 opts3; 313 __le32 opts3;
314 u32 opts4; 314 __le32 opts4;
315 u32 opts5; 315 __le32 opts5;
316 u32 opts6; 316 __le32 opts6;
317}; 317};
318 318
319struct tx_desc { 319struct tx_desc {
320 u32 opts1; 320 __le32 opts1;
321#define TX_FS (1 << 31) /* First segment of a packet */ 321#define TX_FS (1 << 31) /* First segment of a packet */
322#define TX_LS (1 << 30) /* Final segment of a packet */ 322#define TX_LS (1 << 30) /* Final segment of a packet */
323#define TX_LEN_MASK 0x3ffff 323#define TX_LEN_MASK 0x3ffff
324 324
325 u32 opts2; 325 __le32 opts2;
326#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */ 326#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
327#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */ 327#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
328#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */ 328#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
@@ -365,6 +365,7 @@ struct r8152 {
365 struct mii_if_info mii; 365 struct mii_if_info mii;
366 int intr_interval; 366 int intr_interval;
367 u32 msg_enable; 367 u32 msg_enable;
368 u32 tx_qlen;
368 u16 ocp_base; 369 u16 ocp_base;
369 u8 *intr_buff; 370 u8 *intr_buff;
370 u8 version; 371 u8 version;
@@ -876,7 +877,7 @@ static void write_bulk_callback(struct urb *urb)
876static void intr_callback(struct urb *urb) 877static void intr_callback(struct urb *urb)
877{ 878{
878 struct r8152 *tp; 879 struct r8152 *tp;
879 __u16 *d; 880 __le16 *d;
880 int status = urb->status; 881 int status = urb->status;
881 int res; 882 int res;
882 883
@@ -1136,14 +1137,14 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
1136 1137
1137static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) 1138static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1138{ 1139{
1139 u32 remain; 1140 int remain;
1140 u8 *tx_data; 1141 u8 *tx_data;
1141 1142
1142 tx_data = agg->head; 1143 tx_data = agg->head;
1143 agg->skb_num = agg->skb_len = 0; 1144 agg->skb_num = agg->skb_len = 0;
1144 remain = rx_buf_sz - sizeof(struct tx_desc); 1145 remain = rx_buf_sz;
1145 1146
1146 while (remain >= ETH_ZLEN) { 1147 while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) {
1147 struct tx_desc *tx_desc; 1148 struct tx_desc *tx_desc;
1148 struct sk_buff *skb; 1149 struct sk_buff *skb;
1149 unsigned int len; 1150 unsigned int len;
@@ -1152,12 +1153,14 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1152 if (!skb) 1153 if (!skb)
1153 break; 1154 break;
1154 1155
1156 remain -= sizeof(*tx_desc);
1155 len = skb->len; 1157 len = skb->len;
1156 if (remain < len) { 1158 if (remain < len) {
1157 skb_queue_head(&tp->tx_queue, skb); 1159 skb_queue_head(&tp->tx_queue, skb);
1158 break; 1160 break;
1159 } 1161 }
1160 1162
1163 tx_data = tx_agg_align(tx_data);
1161 tx_desc = (struct tx_desc *)tx_data; 1164 tx_desc = (struct tx_desc *)tx_data;
1162 tx_data += sizeof(*tx_desc); 1165 tx_data += sizeof(*tx_desc);
1163 1166
@@ -1167,11 +1170,18 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1167 agg->skb_len += len; 1170 agg->skb_len += len;
1168 dev_kfree_skb_any(skb); 1171 dev_kfree_skb_any(skb);
1169 1172
1170 tx_data = tx_agg_align(tx_data + len); 1173 tx_data += len;
1171 remain = rx_buf_sz - sizeof(*tx_desc) - 1174 remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
1172 (u32)((void *)tx_data - agg->head);
1173 } 1175 }
1174 1176
1177 netif_tx_lock(tp->netdev);
1178
1179 if (netif_queue_stopped(tp->netdev) &&
1180 skb_queue_len(&tp->tx_queue) < tp->tx_qlen)
1181 netif_wake_queue(tp->netdev);
1182
1183 netif_tx_unlock(tp->netdev);
1184
1175 usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), 1185 usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
1176 agg->head, (int)(tx_data - (u8 *)agg->head), 1186 agg->head, (int)(tx_data - (u8 *)agg->head),
1177 (usb_complete_t)write_bulk_callback, agg); 1187 (usb_complete_t)write_bulk_callback, agg);
@@ -1188,7 +1198,6 @@ static void rx_bottom(struct r8152 *tp)
1188 list_for_each_safe(cursor, next, &tp->rx_done) { 1198 list_for_each_safe(cursor, next, &tp->rx_done) {
1189 struct rx_desc *rx_desc; 1199 struct rx_desc *rx_desc;
1190 struct rx_agg *agg; 1200 struct rx_agg *agg;
1191 unsigned pkt_len;
1192 int len_used = 0; 1201 int len_used = 0;
1193 struct urb *urb; 1202 struct urb *urb;
1194 u8 *rx_data; 1203 u8 *rx_data;
@@ -1204,17 +1213,22 @@ static void rx_bottom(struct r8152 *tp)
1204 1213
1205 rx_desc = agg->head; 1214 rx_desc = agg->head;
1206 rx_data = agg->head; 1215 rx_data = agg->head;
1207 pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; 1216 len_used += sizeof(struct rx_desc);
1208 len_used += sizeof(struct rx_desc) + pkt_len;
1209 1217
1210 while (urb->actual_length >= len_used) { 1218 while (urb->actual_length > len_used) {
1211 struct net_device *netdev = tp->netdev; 1219 struct net_device *netdev = tp->netdev;
1212 struct net_device_stats *stats; 1220 struct net_device_stats *stats;
1221 unsigned int pkt_len;
1213 struct sk_buff *skb; 1222 struct sk_buff *skb;
1214 1223
1224 pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
1215 if (pkt_len < ETH_ZLEN) 1225 if (pkt_len < ETH_ZLEN)
1216 break; 1226 break;
1217 1227
1228 len_used += pkt_len;
1229 if (urb->actual_length < len_used)
1230 break;
1231
1218 stats = rtl8152_get_stats(netdev); 1232 stats = rtl8152_get_stats(netdev);
1219 1233
1220 pkt_len -= 4; /* CRC */ 1234 pkt_len -= 4; /* CRC */
@@ -1234,9 +1248,8 @@ static void rx_bottom(struct r8152 *tp)
1234 1248
1235 rx_data = rx_agg_align(rx_data + pkt_len + 4); 1249 rx_data = rx_agg_align(rx_data + pkt_len + 4);
1236 rx_desc = (struct rx_desc *)rx_data; 1250 rx_desc = (struct rx_desc *)rx_data;
1237 pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
1238 len_used = (int)(rx_data - (u8 *)agg->head); 1251 len_used = (int)(rx_data - (u8 *)agg->head);
1239 len_used += sizeof(struct rx_desc) + pkt_len; 1252 len_used += sizeof(struct rx_desc);
1240 } 1253 }
1241 1254
1242submit: 1255submit:
@@ -1384,53 +1397,17 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
1384 struct net_device *netdev) 1397 struct net_device *netdev)
1385{ 1398{
1386 struct r8152 *tp = netdev_priv(netdev); 1399 struct r8152 *tp = netdev_priv(netdev);
1387 struct net_device_stats *stats = rtl8152_get_stats(netdev);
1388 unsigned long flags;
1389 struct tx_agg *agg = NULL;
1390 struct tx_desc *tx_desc;
1391 unsigned int len;
1392 u8 *tx_data;
1393 int res;
1394 1400
1395 skb_tx_timestamp(skb); 1401 skb_tx_timestamp(skb);
1396 1402
1397 /* If tx_queue is not empty, it means at least one previous packt */ 1403 skb_queue_tail(&tp->tx_queue, skb);
1398 /* is waiting for sending. Don't send current one before it. */
1399 if (skb_queue_empty(&tp->tx_queue))
1400 agg = r8152_get_tx_agg(tp);
1401
1402 if (!agg) {
1403 skb_queue_tail(&tp->tx_queue, skb);
1404 return NETDEV_TX_OK;
1405 }
1406 1404
1407 tx_desc = (struct tx_desc *)agg->head; 1405 if (list_empty(&tp->tx_free) &&
1408 tx_data = agg->head + sizeof(*tx_desc); 1406 skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
1409 agg->skb_num = agg->skb_len = 0; 1407 netif_stop_queue(netdev);
1410 1408
1411 len = skb->len; 1409 if (!list_empty(&tp->tx_free))
1412 r8152_tx_csum(tp, tx_desc, skb); 1410 tasklet_schedule(&tp->tl);
1413 memcpy(tx_data, skb->data, len);
1414 dev_kfree_skb_any(skb);
1415 agg->skb_num++;
1416 agg->skb_len += len;
1417 usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
1418 agg->head, len + sizeof(*tx_desc),
1419 (usb_complete_t)write_bulk_callback, agg);
1420 res = usb_submit_urb(agg->urb, GFP_ATOMIC);
1421 if (res) {
1422 /* Can we get/handle EPIPE here? */
1423 if (res == -ENODEV) {
1424 netif_device_detach(tp->netdev);
1425 } else {
1426 netif_warn(tp, tx_err, netdev,
1427 "failed tx_urb %d\n", res);
1428 stats->tx_dropped++;
1429 spin_lock_irqsave(&tp->tx_lock, flags);
1430 list_add_tail(&agg->list, &tp->tx_free);
1431 spin_unlock_irqrestore(&tp->tx_lock, flags);
1432 }
1433 }
1434 1411
1435 return NETDEV_TX_OK; 1412 return NETDEV_TX_OK;
1436} 1413}
@@ -1459,6 +1436,14 @@ static void rtl8152_nic_reset(struct r8152 *tp)
1459 } 1436 }
1460} 1437}
1461 1438
1439static void set_tx_qlen(struct r8152 *tp)
1440{
1441 struct net_device *netdev = tp->netdev;
1442
1443 tp->tx_qlen = rx_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN +
1444 sizeof(struct tx_desc));
1445}
1446
1462static inline u8 rtl8152_get_speed(struct r8152 *tp) 1447static inline u8 rtl8152_get_speed(struct r8152 *tp)
1463{ 1448{
1464 return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); 1449 return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
@@ -1470,6 +1455,7 @@ static int rtl8152_enable(struct r8152 *tp)
1470 int i, ret; 1455 int i, ret;
1471 u8 speed; 1456 u8 speed;
1472 1457
1458 set_tx_qlen(tp);
1473 speed = rtl8152_get_speed(tp); 1459 speed = rtl8152_get_speed(tp);
1474 if (speed & _10bps) { 1460 if (speed & _10bps) {
1475 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); 1461 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 90a429b7ebad..8494bb53ebdc 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -204,9 +204,6 @@ static void intr_complete (struct urb *urb)
204 break; 204 break;
205 } 205 }
206 206
207 if (!netif_running (dev->net))
208 return;
209
210 status = usb_submit_urb (urb, GFP_ATOMIC); 207 status = usb_submit_urb (urb, GFP_ATOMIC);
211 if (status != 0) 208 if (status != 0)
212 netif_err(dev, timer, dev->net, 209 netif_err(dev, timer, dev->net,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cdc7c90a6a9e..d208f8604981 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -36,7 +36,10 @@ module_param(csum, bool, 0444);
36module_param(gso, bool, 0444); 36module_param(gso, bool, 0444);
37 37
38/* FIXME: MTU in config. */ 38/* FIXME: MTU in config. */
39#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 39#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
41 sizeof(struct virtio_net_hdr_mrg_rxbuf), \
42 L1_CACHE_BYTES))
40#define GOOD_COPY_LEN 128 43#define GOOD_COPY_LEN 128
41 44
42#define VIRTNET_DRIVER_VERSION "1.0.0" 45#define VIRTNET_DRIVER_VERSION "1.0.0"
@@ -296,35 +299,76 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
296 return skb; 299 return skb;
297} 300}
298 301
299static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) 302static struct sk_buff *receive_small(void *buf, unsigned int len)
300{ 303{
301 struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); 304 struct sk_buff * skb = buf;
305
306 len -= sizeof(struct virtio_net_hdr);
307 skb_trim(skb, len);
308
309 return skb;
310}
311
312static struct sk_buff *receive_big(struct net_device *dev,
313 struct receive_queue *rq,
314 void *buf,
315 unsigned int len)
316{
317 struct page *page = buf;
318 struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
319
320 if (unlikely(!skb))
321 goto err;
322
323 return skb;
324
325err:
326 dev->stats.rx_dropped++;
327 give_pages(rq, page);
328 return NULL;
329}
330
331static struct sk_buff *receive_mergeable(struct net_device *dev,
332 struct receive_queue *rq,
333 void *buf,
334 unsigned int len)
335{
336 struct skb_vnet_hdr *hdr = buf;
337 int num_buf = hdr->mhdr.num_buffers;
338 struct page *page = virt_to_head_page(buf);
339 int offset = buf - page_address(page);
340 struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
341 MERGE_BUFFER_LEN);
302 struct sk_buff *curr_skb = head_skb; 342 struct sk_buff *curr_skb = head_skb;
303 char *buf;
304 struct page *page;
305 int num_buf, len, offset;
306 343
307 num_buf = hdr->mhdr.num_buffers; 344 if (unlikely(!curr_skb))
345 goto err_skb;
346
308 while (--num_buf) { 347 while (--num_buf) {
309 int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 348 int num_skb_frags;
349
310 buf = virtqueue_get_buf(rq->vq, &len); 350 buf = virtqueue_get_buf(rq->vq, &len);
311 if (unlikely(!buf)) { 351 if (unlikely(!buf)) {
312 pr_debug("%s: rx error: %d buffers missing\n", 352 pr_debug("%s: rx error: %d buffers out of %d missing\n",
313 head_skb->dev->name, hdr->mhdr.num_buffers); 353 dev->name, num_buf, hdr->mhdr.num_buffers);
314 head_skb->dev->stats.rx_length_errors++; 354 dev->stats.rx_length_errors++;
315 return -EINVAL; 355 goto err_buf;
316 } 356 }
317 if (unlikely(len > MAX_PACKET_LEN)) { 357 if (unlikely(len > MERGE_BUFFER_LEN)) {
318 pr_debug("%s: rx error: merge buffer too long\n", 358 pr_debug("%s: rx error: merge buffer too long\n",
319 head_skb->dev->name); 359 dev->name);
320 len = MAX_PACKET_LEN; 360 len = MERGE_BUFFER_LEN;
321 } 361 }
362
363 page = virt_to_head_page(buf);
364 --rq->num;
365
366 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
322 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 367 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
323 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 368 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
324 if (unlikely(!nskb)) { 369
325 head_skb->dev->stats.rx_dropped++; 370 if (unlikely(!nskb))
326 return -ENOMEM; 371 goto err_skb;
327 }
328 if (curr_skb == head_skb) 372 if (curr_skb == head_skb)
329 skb_shinfo(curr_skb)->frag_list = nskb; 373 skb_shinfo(curr_skb)->frag_list = nskb;
330 else 374 else
@@ -336,22 +380,39 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
336 if (curr_skb != head_skb) { 380 if (curr_skb != head_skb) {
337 head_skb->data_len += len; 381 head_skb->data_len += len;
338 head_skb->len += len; 382 head_skb->len += len;
339 head_skb->truesize += MAX_PACKET_LEN; 383 head_skb->truesize += MERGE_BUFFER_LEN;
340 } 384 }
341 page = virt_to_head_page(buf); 385 offset = buf - page_address(page);
342 offset = buf - (char *)page_address(page);
343 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 386 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
344 put_page(page); 387 put_page(page);
345 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 388 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
346 len, MAX_PACKET_LEN); 389 len, MERGE_BUFFER_LEN);
347 } else { 390 } else {
348 skb_add_rx_frag(curr_skb, num_skb_frags, page, 391 skb_add_rx_frag(curr_skb, num_skb_frags, page,
349 offset, len, 392 offset, len, MERGE_BUFFER_LEN);
350 MAX_PACKET_LEN);
351 } 393 }
394 }
395
396 return head_skb;
397
398err_skb:
399 put_page(page);
400 while (--num_buf) {
401 buf = virtqueue_get_buf(rq->vq, &len);
402 if (unlikely(!buf)) {
403 pr_debug("%s: rx error: %d buffers missing\n",
404 dev->name, num_buf);
405 dev->stats.rx_length_errors++;
406 break;
407 }
408 page = virt_to_head_page(buf);
409 put_page(page);
352 --rq->num; 410 --rq->num;
353 } 411 }
354 return 0; 412err_buf:
413 dev->stats.rx_dropped++;
414 dev_kfree_skb(head_skb);
415 return NULL;
355} 416}
356 417
357static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) 418static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
@@ -360,48 +421,29 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
360 struct net_device *dev = vi->dev; 421 struct net_device *dev = vi->dev;
361 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 422 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
362 struct sk_buff *skb; 423 struct sk_buff *skb;
363 struct page *page;
364 struct skb_vnet_hdr *hdr; 424 struct skb_vnet_hdr *hdr;
365 425
366 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 426 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
367 pr_debug("%s: short packet %i\n", dev->name, len); 427 pr_debug("%s: short packet %i\n", dev->name, len);
368 dev->stats.rx_length_errors++; 428 dev->stats.rx_length_errors++;
369 if (vi->big_packets) 429 if (vi->mergeable_rx_bufs)
370 give_pages(rq, buf);
371 else if (vi->mergeable_rx_bufs)
372 put_page(virt_to_head_page(buf)); 430 put_page(virt_to_head_page(buf));
431 else if (vi->big_packets)
432 give_pages(rq, buf);
373 else 433 else
374 dev_kfree_skb(buf); 434 dev_kfree_skb(buf);
375 return; 435 return;
376 } 436 }
377 437
378 if (!vi->mergeable_rx_bufs && !vi->big_packets) { 438 if (vi->mergeable_rx_bufs)
379 skb = buf; 439 skb = receive_mergeable(dev, rq, buf, len);
380 len -= sizeof(struct virtio_net_hdr); 440 else if (vi->big_packets)
381 skb_trim(skb, len); 441 skb = receive_big(dev, rq, buf, len);
382 } else if (vi->mergeable_rx_bufs) { 442 else
383 struct page *page = virt_to_head_page(buf); 443 skb = receive_small(buf, len);
384 skb = page_to_skb(rq, page, 444
385 (char *)buf - (char *)page_address(page), 445 if (unlikely(!skb))
386 len, MAX_PACKET_LEN); 446 return;
387 if (unlikely(!skb)) {
388 dev->stats.rx_dropped++;
389 put_page(page);
390 return;
391 }
392 if (receive_mergeable(rq, skb)) {
393 dev_kfree_skb(skb);
394 return;
395 }
396 } else {
397 page = buf;
398 skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
399 if (unlikely(!skb)) {
400 dev->stats.rx_dropped++;
401 give_pages(rq, page);
402 return;
403 }
404 }
405 447
406 hdr = skb_vnet_hdr(skb); 448 hdr = skb_vnet_hdr(skb);
407 449
@@ -471,11 +513,11 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
471 struct skb_vnet_hdr *hdr; 513 struct skb_vnet_hdr *hdr;
472 int err; 514 int err;
473 515
474 skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); 516 skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
475 if (unlikely(!skb)) 517 if (unlikely(!skb))
476 return -ENOMEM; 518 return -ENOMEM;
477 519
478 skb_put(skb, MAX_PACKET_LEN); 520 skb_put(skb, GOOD_PACKET_LEN);
479 521
480 hdr = skb_vnet_hdr(skb); 522 hdr = skb_vnet_hdr(skb);
481 sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); 523 sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
@@ -542,20 +584,20 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
542 int err; 584 int err;
543 585
544 if (gfp & __GFP_WAIT) { 586 if (gfp & __GFP_WAIT) {
545 if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag, 587 if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag,
546 gfp)) { 588 gfp)) {
547 buf = (char *)page_address(vi->alloc_frag.page) + 589 buf = (char *)page_address(vi->alloc_frag.page) +
548 vi->alloc_frag.offset; 590 vi->alloc_frag.offset;
549 get_page(vi->alloc_frag.page); 591 get_page(vi->alloc_frag.page);
550 vi->alloc_frag.offset += MAX_PACKET_LEN; 592 vi->alloc_frag.offset += MERGE_BUFFER_LEN;
551 } 593 }
552 } else { 594 } else {
553 buf = netdev_alloc_frag(MAX_PACKET_LEN); 595 buf = netdev_alloc_frag(MERGE_BUFFER_LEN);
554 } 596 }
555 if (!buf) 597 if (!buf)
556 return -ENOMEM; 598 return -ENOMEM;
557 599
558 sg_init_one(rq->sg, buf, MAX_PACKET_LEN); 600 sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
559 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); 601 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
560 if (err < 0) 602 if (err < 0)
561 put_page(virt_to_head_page(buf)); 603 put_page(virt_to_head_page(buf));
@@ -1082,7 +1124,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1082 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1124 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1083 VIRTIO_NET_CTRL_MAC_TABLE_SET, 1125 VIRTIO_NET_CTRL_MAC_TABLE_SET,
1084 sg, NULL)) 1126 sg, NULL))
1085 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); 1127 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1086 1128
1087 kfree(buf); 1129 kfree(buf);
1088} 1130}
@@ -1325,6 +1367,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
1325 1367
1326static void virtnet_free_queues(struct virtnet_info *vi) 1368static void virtnet_free_queues(struct virtnet_info *vi)
1327{ 1369{
1370 int i;
1371
1372 for (i = 0; i < vi->max_queue_pairs; i++)
1373 netif_napi_del(&vi->rq[i].napi);
1374
1328 kfree(vi->rq); 1375 kfree(vi->rq);
1329 kfree(vi->sq); 1376 kfree(vi->sq);
1330} 1377}
@@ -1354,10 +1401,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
1354 struct virtqueue *vq = vi->rq[i].vq; 1401 struct virtqueue *vq = vi->rq[i].vq;
1355 1402
1356 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1403 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1357 if (vi->big_packets) 1404 if (vi->mergeable_rx_bufs)
1358 give_pages(&vi->rq[i], buf);
1359 else if (vi->mergeable_rx_bufs)
1360 put_page(virt_to_head_page(buf)); 1405 put_page(virt_to_head_page(buf));
1406 else if (vi->big_packets)
1407 give_pages(&vi->rq[i], buf);
1361 else 1408 else
1362 dev_kfree_skb(buf); 1409 dev_kfree_skb(buf);
1363 --vi->rq[i].num; 1410 --vi->rq[i].num;
@@ -1619,8 +1666,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1619 if (err) 1666 if (err)
1620 goto free_stats; 1667 goto free_stats;
1621 1668
1622 netif_set_real_num_tx_queues(dev, 1); 1669 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
1623 netif_set_real_num_rx_queues(dev, 1); 1670 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
1624 1671
1625 err = register_netdev(dev); 1672 err = register_netdev(dev);
1626 if (err) { 1673 if (err) {
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 0358c07f7669..249e01c5600c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1668,7 +1668,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1668 netdev_dbg(dev, "circular route to %pI4\n", 1668 netdev_dbg(dev, "circular route to %pI4\n",
1669 &dst->sin.sin_addr.s_addr); 1669 &dst->sin.sin_addr.s_addr);
1670 dev->stats.collisions++; 1670 dev->stats.collisions++;
1671 goto tx_error; 1671 goto rt_tx_error;
1672 } 1672 }
1673 1673
1674 /* Bypass encapsulation if the destination is local */ 1674 /* Bypass encapsulation if the destination is local */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 1ec52356b5a1..130657db5c43 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3984,18 +3984,20 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
3984 int quick_drop; 3984 int quick_drop;
3985 s32 t[3], f[3] = {5180, 5500, 5785}; 3985 s32 t[3], f[3] = {5180, 5500, 5785};
3986 3986
3987 if (!(pBase->miscConfiguration & BIT(1))) 3987 if (!(pBase->miscConfiguration & BIT(4)))
3988 return; 3988 return;
3989 3989
3990 if (freq < 4000) 3990 if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9340(ah)) {
3991 quick_drop = eep->modalHeader2G.quick_drop; 3991 if (freq < 4000) {
3992 else { 3992 quick_drop = eep->modalHeader2G.quick_drop;
3993 t[0] = eep->base_ext1.quick_drop_low; 3993 } else {
3994 t[1] = eep->modalHeader5G.quick_drop; 3994 t[0] = eep->base_ext1.quick_drop_low;
3995 t[2] = eep->base_ext1.quick_drop_high; 3995 t[1] = eep->modalHeader5G.quick_drop;
3996 quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); 3996 t[2] = eep->base_ext1.quick_drop_high;
3997 quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
3998 }
3999 REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
3997 } 4000 }
3998 REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
3999} 4001}
4000 4002
4001static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz) 4003static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz)
@@ -4035,7 +4037,7 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
4035 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; 4037 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
4036 u8 bias; 4038 u8 bias;
4037 4039
4038 if (!(eep->baseEepHeader.featureEnable & 0x40)) 4040 if (!(eep->baseEepHeader.miscConfiguration & 0x40))
4039 return; 4041 return;
4040 4042
4041 if (!AR_SREV_9300(ah)) 4043 if (!AR_SREV_9300(ah))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index b07f164d65cf..20e49095db2a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -187,17 +187,17 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
187 INIT_INI_ARRAY(&ah->iniCckfirJapan2484, 187 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
188 ar9485_1_1_baseband_core_txfir_coeff_japan_2484); 188 ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
189 189
190 /* Load PCIE SERDES settings from INI */ 190 if (ah->config.no_pll_pwrsave) {
191 191 INIT_INI_ARRAY(&ah->iniPcieSerdes,
192 /* Awake Setting */ 192 ar9485_1_1_pcie_phy_clkreq_disable_L1);
193 193 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
194 INIT_INI_ARRAY(&ah->iniPcieSerdes, 194 ar9485_1_1_pcie_phy_clkreq_disable_L1);
195 ar9485_1_1_pcie_phy_clkreq_disable_L1); 195 } else {
196 196 INIT_INI_ARRAY(&ah->iniPcieSerdes,
197 /* Sleep Setting */ 197 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
198 198 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
199 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 199 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
200 ar9485_1_1_pcie_phy_clkreq_disable_L1); 200 }
201 } else if (AR_SREV_9462_21(ah)) { 201 } else if (AR_SREV_9462_21(ah)) {
202 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 202 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
203 ar9462_2p1_mac_core); 203 ar9462_2p1_mac_core);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 11f53589a3f3..d39b79f5e841 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -701,6 +701,54 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
701 return ret; 701 return ret;
702} 702}
703 703
704static void ar9003_doubler_fix(struct ath_hw *ah)
705{
706 if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9550(ah)) {
707 REG_RMW(ah, AR_PHY_65NM_CH0_RXTX2,
708 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
709 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0);
710 REG_RMW(ah, AR_PHY_65NM_CH1_RXTX2,
711 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
712 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0);
713 REG_RMW(ah, AR_PHY_65NM_CH2_RXTX2,
714 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
715 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0);
716
717 udelay(200);
718
719 REG_CLR_BIT(ah, AR_PHY_65NM_CH0_RXTX2,
720 AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK);
721 REG_CLR_BIT(ah, AR_PHY_65NM_CH1_RXTX2,
722 AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK);
723 REG_CLR_BIT(ah, AR_PHY_65NM_CH2_RXTX2,
724 AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK);
725
726 udelay(1);
727
728 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX2,
729 AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1);
730 REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX2,
731 AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1);
732 REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX2,
733 AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1);
734
735 udelay(200);
736
737 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH12,
738 AR_PHY_65NM_CH0_SYNTH12_VREFMUL3, 0xf);
739
740 REG_RMW(ah, AR_PHY_65NM_CH0_RXTX2, 0,
741 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
742 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S);
743 REG_RMW(ah, AR_PHY_65NM_CH1_RXTX2, 0,
744 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
745 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S);
746 REG_RMW(ah, AR_PHY_65NM_CH2_RXTX2, 0,
747 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
748 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S);
749 }
750}
751
704static int ar9003_hw_process_ini(struct ath_hw *ah, 752static int ar9003_hw_process_ini(struct ath_hw *ah,
705 struct ath9k_channel *chan) 753 struct ath9k_channel *chan)
706{ 754{
@@ -726,6 +774,8 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
726 modesIndex); 774 modesIndex);
727 } 775 }
728 776
777 ar9003_doubler_fix(ah);
778
729 /* 779 /*
730 * RXGAIN initvals. 780 * RXGAIN initvals.
731 */ 781 */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index fca624322dc8..2af667beb273 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -656,13 +656,24 @@
656#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002) 656#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002)
657#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1) 657#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1)
658#define AR_PHY_65NM_CH0_SYNTH7 0x16098 658#define AR_PHY_65NM_CH0_SYNTH7 0x16098
659#define AR_PHY_65NM_CH0_SYNTH12 0x160ac
659#define AR_PHY_65NM_CH0_BIAS1 0x160c0 660#define AR_PHY_65NM_CH0_BIAS1 0x160c0
660#define AR_PHY_65NM_CH0_BIAS2 0x160c4 661#define AR_PHY_65NM_CH0_BIAS2 0x160c4
661#define AR_PHY_65NM_CH0_BIAS4 0x160cc 662#define AR_PHY_65NM_CH0_BIAS4 0x160cc
663#define AR_PHY_65NM_CH0_RXTX2 0x16104
664#define AR_PHY_65NM_CH1_RXTX2 0x16504
665#define AR_PHY_65NM_CH2_RXTX2 0x16904
662#define AR_PHY_65NM_CH0_RXTX4 0x1610c 666#define AR_PHY_65NM_CH0_RXTX4 0x1610c
663#define AR_PHY_65NM_CH1_RXTX4 0x1650c 667#define AR_PHY_65NM_CH1_RXTX4 0x1650c
664#define AR_PHY_65NM_CH2_RXTX4 0x1690c 668#define AR_PHY_65NM_CH2_RXTX4 0x1690c
665 669
670#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3 0x00780000
671#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3_S 19
672#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK 0x00000004
673#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S 2
674#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK 0x00000008
675#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S 3
676
666#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \ 677#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
667 (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280))) 678 (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280)))
668#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300) 679#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
index 4dbc294df7e3..57fc5f459d0a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -361,7 +361,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
361 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, 361 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
362 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 362 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
363 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 363 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
364 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 364 {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
365 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 365 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
366 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, 366 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
367 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, 367 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
@@ -400,7 +400,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
400 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, 400 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
401 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 401 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
402 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 402 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
403 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, 403 {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
404 {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, 404 {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
405}; 405};
406 406
@@ -472,7 +472,7 @@ static const u32 ar9462_2p1_radio_postamble[][5] = {
472 472
473static const u32 ar9462_2p1_soc_preamble[][2] = { 473static const u32 ar9462_2p1_soc_preamble[][2] = {
474 /* Addr allmodes */ 474 /* Addr allmodes */
475 {0x000040a4, 0x00a0c1c9}, 475 {0x000040a4, 0x00a0c9c9},
476 {0x00007020, 0x00000000}, 476 {0x00007020, 0x00000000},
477 {0x00007034, 0x00000002}, 477 {0x00007034, 0x00000002},
478 {0x00007038, 0x000004c2}, 478 {0x00007038, 0x000004c2},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 6f899c692647..7c1845221e1c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -32,13 +32,6 @@ static const u32 ar9485_1_1_mac_postamble[][5] = {
32 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440}, 32 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
33}; 33};
34 34
35static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
36 /* Addr allmodes */
37 {0x00018c00, 0x18012e5e},
38 {0x00018c04, 0x000801d8},
39 {0x00018c08, 0x0000080c},
40};
41
42static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = { 35static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
43 /* Addr allmodes */ 36 /* Addr allmodes */
44 {0x00009e00, 0x037216a0}, 37 {0x00009e00, 0x037216a0},
@@ -1101,20 +1094,6 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
1101 {0x0000a1fc, 0x00000296}, 1094 {0x0000a1fc, 0x00000296},
1102}; 1095};
1103 1096
1104static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
1105 /* Addr allmodes */
1106 {0x00018c00, 0x18052e5e},
1107 {0x00018c04, 0x000801d8},
1108 {0x00018c08, 0x0000080c},
1109};
1110
1111static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
1112 /* Addr allmodes */
1113 {0x00018c00, 0x18053e5e},
1114 {0x00018c04, 0x000801d8},
1115 {0x00018c08, 0x0000080c},
1116};
1117
1118static const u32 ar9485_1_1_soc_preamble[][2] = { 1097static const u32 ar9485_1_1_soc_preamble[][2] = {
1119 /* Addr allmodes */ 1098 /* Addr allmodes */
1120 {0x00004014, 0xba280400}, 1099 {0x00004014, 0xba280400},
@@ -1173,13 +1152,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
1173 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1152 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1174}; 1153};
1175 1154
1176static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
1177 /* Addr allmodes */
1178 {0x00018c00, 0x18013e5e},
1179 {0x00018c04, 0x000801d8},
1180 {0x00018c08, 0x0000080c},
1181};
1182
1183static const u32 ar9485_1_1_radio_postamble[][2] = { 1155static const u32 ar9485_1_1_radio_postamble[][2] = {
1184 /* Addr allmodes */ 1156 /* Addr allmodes */
1185 {0x0001609c, 0x0b283f31}, 1157 {0x0001609c, 0x0b283f31},
@@ -1358,4 +1330,18 @@ static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
1358 {0x0000a3a0, 0xca9228ee}, 1330 {0x0000a3a0, 0xca9228ee},
1359}; 1331};
1360 1332
1333static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
1334 /* Addr allmodes */
1335 {0x00018c00, 0x18013e5e},
1336 {0x00018c04, 0x000801d8},
1337 {0x00018c08, 0x0000080c},
1338};
1339
1340static const u32 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1[][2] = {
1341 /* Addr allmodes */
1342 {0x00018c00, 0x1801265e},
1343 {0x00018c04, 0x000801d8},
1344 {0x00018c08, 0x0000080c},
1345};
1346
1361#endif /* INITVALS_9485_H */ 1347#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index e7a38d844a6a..60a5da53668f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -632,15 +632,16 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
632/* Main driver core */ 632/* Main driver core */
633/********************/ 633/********************/
634 634
635#define ATH9K_PCI_CUS198 0x0001 635#define ATH9K_PCI_CUS198 0x0001
636#define ATH9K_PCI_CUS230 0x0002 636#define ATH9K_PCI_CUS230 0x0002
637#define ATH9K_PCI_CUS217 0x0004 637#define ATH9K_PCI_CUS217 0x0004
638#define ATH9K_PCI_CUS252 0x0008 638#define ATH9K_PCI_CUS252 0x0008
639#define ATH9K_PCI_WOW 0x0010 639#define ATH9K_PCI_WOW 0x0010
640#define ATH9K_PCI_BT_ANT_DIV 0x0020 640#define ATH9K_PCI_BT_ANT_DIV 0x0020
641#define ATH9K_PCI_D3_L1_WAR 0x0040 641#define ATH9K_PCI_D3_L1_WAR 0x0040
642#define ATH9K_PCI_AR9565_1ANT 0x0080 642#define ATH9K_PCI_AR9565_1ANT 0x0080
643#define ATH9K_PCI_AR9565_2ANT 0x0100 643#define ATH9K_PCI_AR9565_2ANT 0x0100
644#define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200
644 645
645/* 646/*
646 * Default cache line size, in bytes. 647 * Default cache line size, in bytes.
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 90b8342d1ed4..8824610c21fb 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -44,14 +44,20 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
44 if (buf == NULL) 44 if (buf == NULL)
45 return -ENOMEM; 45 return -ENOMEM;
46 46
47 if (sc->dfs_detector)
48 dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector);
49
50 len += scnprintf(buf + len, size - len, "DFS support for " 47 len += scnprintf(buf + len, size - len, "DFS support for "
51 "macVersion = 0x%x, macRev = 0x%x: %s\n", 48 "macVersion = 0x%x, macRev = 0x%x: %s\n",
52 hw_ver->macVersion, hw_ver->macRev, 49 hw_ver->macVersion, hw_ver->macRev,
53 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? 50 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
54 "enabled" : "disabled"); 51 "enabled" : "disabled");
52
53 if (!sc->dfs_detector) {
54 len += scnprintf(buf + len, size - len,
55 "DFS detector not enabled\n");
56 goto exit;
57 }
58
59 dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector);
60
55 len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); 61 len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
56 ATH9K_DFS_STAT("pulse events reported ", pulses_total); 62 ATH9K_DFS_STAT("pulse events reported ", pulses_total);
57 ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs); 63 ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs);
@@ -76,6 +82,7 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
76 ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error); 82 ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error);
77 ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used); 83 ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used);
78 84
85exit:
79 if (len > size) 86 if (len > size)
80 len = size; 87 len = size;
81 88
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 54b04155e43b..8918035da3a3 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -146,10 +146,9 @@ static void ath9k_hw_set_clockrate(struct ath_hw *ah)
146 else 146 else
147 clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; 147 clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
148 148
149 if (IS_CHAN_HT40(chan)) 149 if (chan) {
150 clockrate *= 2; 150 if (IS_CHAN_HT40(chan))
151 151 clockrate *= 2;
152 if (ah->curchan) {
153 if (IS_CHAN_HALF_RATE(chan)) 152 if (IS_CHAN_HALF_RATE(chan))
154 clockrate /= 2; 153 clockrate /= 2;
155 if (IS_CHAN_QUARTER_RATE(chan)) 154 if (IS_CHAN_QUARTER_RATE(chan))
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9ea24f1cba73..a2c9a5dbac6b 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -316,6 +316,7 @@ struct ath9k_ops_config {
316 u32 ant_ctrl_comm2g_switch_enable; 316 u32 ant_ctrl_comm2g_switch_enable;
317 bool xatten_margin_cfg; 317 bool xatten_margin_cfg;
318 bool alt_mingainidx; 318 bool alt_mingainidx;
319 bool no_pll_pwrsave;
319}; 320};
320 321
321enum ath9k_int { 322enum ath9k_int {
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d8643ebabd30..710192ed27ed 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -609,6 +609,11 @@ static void ath9k_init_platform(struct ath_softc *sc)
609 ah->config.pcie_waen = 0x0040473b; 609 ah->config.pcie_waen = 0x0040473b;
610 ath_info(common, "Enable WAR for ASPM D3/L1\n"); 610 ath_info(common, "Enable WAR for ASPM D3/L1\n");
611 } 611 }
612
613 if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) {
614 ah->config.no_pll_pwrsave = true;
615 ath_info(common, "Disable PLL PowerSave\n");
616 }
612} 617}
613 618
614static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, 619static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
@@ -863,8 +868,8 @@ static const struct ieee80211_iface_combination if_comb[] = {
863 .max_interfaces = 1, 868 .max_interfaces = 1,
864 .num_different_channels = 1, 869 .num_different_channels = 1,
865 .beacon_int_infra_match = true, 870 .beacon_int_infra_match = true,
866 .radar_detect_widths = BIT(NL80211_CHAN_NO_HT) | 871 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
867 BIT(NL80211_CHAN_HT20), 872 BIT(NL80211_CHAN_WIDTH_20),
868 } 873 }
869}; 874};
870 875
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 7e4c2524b630..b5656fce4ff5 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -195,6 +195,93 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
195 0x3219), 195 0x3219),
196 .driver_data = ATH9K_PCI_BT_ANT_DIV }, 196 .driver_data = ATH9K_PCI_BT_ANT_DIV },
197 197
198 /* AR9485 cards with PLL power-save disabled by default. */
199 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
200 0x0032,
201 PCI_VENDOR_ID_AZWAVE,
202 0x2C97),
203 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
204 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
205 0x0032,
206 PCI_VENDOR_ID_AZWAVE,
207 0x2100),
208 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
209 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
210 0x0032,
211 0x1C56, /* ASKEY */
212 0x4001),
213 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
214 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
215 0x0032,
216 0x11AD, /* LITEON */
217 0x6627),
218 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
219 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
220 0x0032,
221 0x11AD, /* LITEON */
222 0x6628),
223 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
224 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
225 0x0032,
226 PCI_VENDOR_ID_FOXCONN,
227 0xE04E),
228 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
229 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
230 0x0032,
231 PCI_VENDOR_ID_FOXCONN,
232 0xE04F),
233 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
234 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
235 0x0032,
236 0x144F, /* ASKEY */
237 0x7197),
238 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
239 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
240 0x0032,
241 0x1B9A, /* XAVI */
242 0x2000),
243 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
244 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
245 0x0032,
246 0x1B9A, /* XAVI */
247 0x2001),
248 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
249 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
250 0x0032,
251 PCI_VENDOR_ID_AZWAVE,
252 0x1186),
253 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
254 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
255 0x0032,
256 PCI_VENDOR_ID_AZWAVE,
257 0x1F86),
258 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
259 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
260 0x0032,
261 PCI_VENDOR_ID_AZWAVE,
262 0x1195),
263 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
264 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
265 0x0032,
266 PCI_VENDOR_ID_AZWAVE,
267 0x1F95),
268 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
269 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
270 0x0032,
271 0x1B9A, /* XAVI */
272 0x1C00),
273 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
274 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
275 0x0032,
276 0x1B9A, /* XAVI */
277 0x1C01),
278 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
279 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
280 0x0032,
281 PCI_VENDOR_ID_ASUSTEK,
282 0x850D),
283 .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
284
198 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ 285 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
199 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ 286 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
200 287
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 09cdbcd09739..b5a19e098f2d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1276,6 +1276,10 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1276 if (!rts_thresh || (len > rts_thresh)) 1276 if (!rts_thresh || (len > rts_thresh))
1277 rts = true; 1277 rts = true;
1278 } 1278 }
1279
1280 if (!aggr)
1281 len = fi->framelen;
1282
1279 ath_buf_set_rate(sc, bf, &info, len, rts); 1283 ath_buf_set_rate(sc, bf, &info, len, rts);
1280 } 1284 }
1281 1285
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index c00687e05688..1217c52ab28e 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -362,7 +362,8 @@ static int __ath_reg_dyn_country(struct wiphy *wiphy,
362{ 362{
363 u16 country_code; 363 u16 country_code;
364 364
365 if (!ath_is_world_regd(reg)) 365 if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
366 !ath_is_world_regd(reg))
366 return -EINVAL; 367 return -EINVAL;
367 368
368 country_code = ath_regd_find_country_by_name(request->alpha2); 369 country_code = ath_regd_find_country_by_name(request->alpha2);
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
index 5b84f7ae0b1e..ef44a2da644d 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.c
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -126,7 +126,7 @@ static ssize_t write_file_dump(struct file *file,
126 if (begin == NULL) 126 if (begin == NULL)
127 break; 127 break;
128 128
129 if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0) 129 if (kstrtou32(begin, 0, &arg[i]) != 0)
130 break; 130 break;
131 } 131 }
132 132
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index f8c3a10510c2..366339421d4f 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -1286,7 +1286,8 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
1286 } else { 1286 } else {
1287 wcn36xx_err("Beacon is to big: beacon size=%d\n", 1287 wcn36xx_err("Beacon is to big: beacon size=%d\n",
1288 msg_body.beacon_length); 1288 msg_body.beacon_length);
1289 return -ENOMEM; 1289 ret = -ENOMEM;
1290 goto out;
1290 } 1291 }
1291 memcpy(msg_body.bssid, vif->addr, ETH_ALEN); 1292 memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
1292 1293
@@ -1327,7 +1328,8 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
1327 if (skb->len > BEACON_TEMPLATE_SIZE) { 1328 if (skb->len > BEACON_TEMPLATE_SIZE) {
1328 wcn36xx_warn("probe response template is too big: %d\n", 1329 wcn36xx_warn("probe response template is too big: %d\n",
1329 skb->len); 1330 skb->len);
1330 return -E2BIG; 1331 ret = -E2BIG;
1332 goto out;
1331 } 1333 }
1332 1334
1333 msg.probe_resp_template_len = skb->len; 1335 msg.probe_resp_template_len = skb->len;
@@ -1606,7 +1608,8 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
1606 /* TODO: it also support ARP response type */ 1608 /* TODO: it also support ARP response type */
1607 } else { 1609 } else {
1608 wcn36xx_warn("unknow keep alive packet type %d\n", packet_type); 1610 wcn36xx_warn("unknow keep alive packet type %d\n", packet_type);
1609 return -EINVAL; 1611 ret = -EINVAL;
1612 goto out;
1610 } 1613 }
1611 1614
1612 PREPARE_HAL_BUF(wcn->hal_buf, msg_body); 1615 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2038,13 +2041,20 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
2038 case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: 2041 case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
2039 mutex_lock(&wcn->hal_ind_mutex); 2042 mutex_lock(&wcn->hal_ind_mutex);
2040 msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL); 2043 msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
2041 msg_ind->msg_len = len; 2044 if (msg_ind) {
2042 msg_ind->msg = kmalloc(len, GFP_KERNEL); 2045 msg_ind->msg_len = len;
2043 memcpy(msg_ind->msg, buf, len); 2046 msg_ind->msg = kmalloc(len, GFP_KERNEL);
2044 list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); 2047 memcpy(msg_ind->msg, buf, len);
2045 queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); 2048 list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
2046 wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n"); 2049 queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
2050 wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
2051 }
2047 mutex_unlock(&wcn->hal_ind_mutex); 2052 mutex_unlock(&wcn->hal_ind_mutex);
2053 if (msg_ind)
2054 break;
2055 /* FIXME: Do something smarter then just printing an error. */
2056 wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
2057 msg_header->msg_type);
2048 break; 2058 break;
2049 default: 2059 default:
2050 wcn36xx_err("SMD_EVENT (%d) not supported\n", 2060 wcn36xx_err("SMD_EVENT (%d) not supported\n",
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index b00a7e92225f..54e36fcb3954 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -5,6 +5,8 @@ config BRCMSMAC
5 tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver" 5 tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
6 depends on MAC80211 6 depends on MAC80211
7 depends on BCMA 7 depends on BCMA
8 select NEW_LEDS if BCMA_DRIVER_GPIO
9 select LEDS_CLASS if BCMA_DRIVER_GPIO
8 select BRCMUTIL 10 select BRCMUTIL
9 select FW_LOADER 11 select FW_LOADER
10 select CRC_CCITT 12 select CRC_CCITT
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 905704e335d7..abc9ceca70f3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -109,6 +109,8 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
109 brcmf_err("Disable F2 failed:%d\n", 109 brcmf_err("Disable F2 failed:%d\n",
110 err_ret); 110 err_ret);
111 } 111 }
112 } else {
113 err_ret = -ENOENT;
112 } 114 }
113 } else if ((regaddr == SDIO_CCCR_ABORT) || 115 } else if ((regaddr == SDIO_CCCR_ABORT) ||
114 (regaddr == SDIO_CCCR_IENx)) { 116 (regaddr == SDIO_CCCR_IENx)) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 5b5b952d47b1..4a2293041821 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -823,6 +823,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
823 } 823 }
824 err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state, 824 err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state,
825 action, P2PAPI_BSSCFG_DEVICE); 825 action, P2PAPI_BSSCFG_DEVICE);
826 kfree(chanspecs);
826 } 827 }
827exit: 828exit:
828 if (err) 829 if (err)
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 85879dbaa402..3c34a72a5d64 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,8 +67,8 @@
67#include "iwl-agn-hw.h" 67#include "iwl-agn-hw.h"
68 68
69/* Highest firmware API version supported */ 69/* Highest firmware API version supported */
70#define IWL7260_UCODE_API_MAX 7 70#define IWL7260_UCODE_API_MAX 8
71#define IWL3160_UCODE_API_MAX 7 71#define IWL3160_UCODE_API_MAX 8
72 72
73/* Oldest version we won't warn about */ 73/* Oldest version we won't warn about */
74#define IWL7260_UCODE_API_OK 7 74#define IWL7260_UCODE_API_OK 7
@@ -130,6 +130,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
130 .ht_params = &iwl7000_ht_params, 130 .ht_params = &iwl7000_ht_params,
131 .nvm_ver = IWL7260_NVM_VERSION, 131 .nvm_ver = IWL7260_NVM_VERSION,
132 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 132 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
133 .host_interrupt_operation_mode = true,
133}; 134};
134 135
135const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { 136const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
@@ -140,6 +141,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
140 .nvm_ver = IWL7260_NVM_VERSION, 141 .nvm_ver = IWL7260_NVM_VERSION,
141 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 142 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
142 .high_temp = true, 143 .high_temp = true,
144 .host_interrupt_operation_mode = true,
143}; 145};
144 146
145const struct iwl_cfg iwl7260_2n_cfg = { 147const struct iwl_cfg iwl7260_2n_cfg = {
@@ -149,6 +151,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
149 .ht_params = &iwl7000_ht_params, 151 .ht_params = &iwl7000_ht_params,
150 .nvm_ver = IWL7260_NVM_VERSION, 152 .nvm_ver = IWL7260_NVM_VERSION,
151 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 153 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
154 .host_interrupt_operation_mode = true,
152}; 155};
153 156
154const struct iwl_cfg iwl7260_n_cfg = { 157const struct iwl_cfg iwl7260_n_cfg = {
@@ -158,6 +161,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
158 .ht_params = &iwl7000_ht_params, 161 .ht_params = &iwl7000_ht_params,
159 .nvm_ver = IWL7260_NVM_VERSION, 162 .nvm_ver = IWL7260_NVM_VERSION,
160 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 163 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
164 .host_interrupt_operation_mode = true,
161}; 165};
162 166
163const struct iwl_cfg iwl3160_2ac_cfg = { 167const struct iwl_cfg iwl3160_2ac_cfg = {
@@ -167,6 +171,7 @@ const struct iwl_cfg iwl3160_2ac_cfg = {
167 .ht_params = &iwl7000_ht_params, 171 .ht_params = &iwl7000_ht_params,
168 .nvm_ver = IWL3160_NVM_VERSION, 172 .nvm_ver = IWL3160_NVM_VERSION,
169 .nvm_calib_ver = IWL3160_TX_POWER_VERSION, 173 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
174 .host_interrupt_operation_mode = true,
170}; 175};
171 176
172const struct iwl_cfg iwl3160_2n_cfg = { 177const struct iwl_cfg iwl3160_2n_cfg = {
@@ -176,6 +181,7 @@ const struct iwl_cfg iwl3160_2n_cfg = {
176 .ht_params = &iwl7000_ht_params, 181 .ht_params = &iwl7000_ht_params,
177 .nvm_ver = IWL3160_NVM_VERSION, 182 .nvm_ver = IWL3160_NVM_VERSION,
178 .nvm_calib_ver = IWL3160_TX_POWER_VERSION, 183 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
184 .host_interrupt_operation_mode = true,
179}; 185};
180 186
181const struct iwl_cfg iwl3160_n_cfg = { 187const struct iwl_cfg iwl3160_n_cfg = {
@@ -185,6 +191,7 @@ const struct iwl_cfg iwl3160_n_cfg = {
185 .ht_params = &iwl7000_ht_params, 191 .ht_params = &iwl7000_ht_params,
186 .nvm_ver = IWL3160_NVM_VERSION, 192 .nvm_ver = IWL3160_NVM_VERSION,
187 .nvm_calib_ver = IWL3160_TX_POWER_VERSION, 193 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
194 .host_interrupt_operation_mode = true,
188}; 195};
189 196
190const struct iwl_cfg iwl7265_2ac_cfg = { 197const struct iwl_cfg iwl7265_2ac_cfg = {
@@ -196,5 +203,23 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
196 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 203 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
197}; 204};
198 205
206const struct iwl_cfg iwl7265_2n_cfg = {
207 .name = "Intel(R) Dual Band Wireless N 7265",
208 .fw_name_pre = IWL7265_FW_PRE,
209 IWL_DEVICE_7000,
210 .ht_params = &iwl7000_ht_params,
211 .nvm_ver = IWL7265_NVM_VERSION,
212 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
213};
214
215const struct iwl_cfg iwl7265_n_cfg = {
216 .name = "Intel(R) Wireless N 7265",
217 .fw_name_pre = IWL7265_FW_PRE,
218 IWL_DEVICE_7000,
219 .ht_params = &iwl7000_ht_params,
220 .nvm_ver = IWL7265_NVM_VERSION,
221 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
222};
223
199MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 224MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
200MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 225MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 18f232e8e812..03fd9aa8bfda 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -207,6 +207,8 @@ struct iwl_eeprom_params {
207 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity 207 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
208 * @internal_wimax_coex: internal wifi/wimax combo device 208 * @internal_wimax_coex: internal wifi/wimax combo device
209 * @high_temp: Is this NIC is designated to be in high temperature. 209 * @high_temp: Is this NIC is designated to be in high temperature.
210 * @host_interrupt_operation_mode: device needs host interrupt operation
211 * mode set
210 * 212 *
211 * We enable the driver to be backward compatible wrt. hardware features. 213 * We enable the driver to be backward compatible wrt. hardware features.
212 * API differences in uCode shouldn't be handled here but through TLVs 214 * API differences in uCode shouldn't be handled here but through TLVs
@@ -235,6 +237,7 @@ struct iwl_cfg {
235 enum iwl_led_mode led_mode; 237 enum iwl_led_mode led_mode;
236 const bool rx_with_siso_diversity; 238 const bool rx_with_siso_diversity;
237 const bool internal_wimax_coex; 239 const bool internal_wimax_coex;
240 const bool host_interrupt_operation_mode;
238 bool high_temp; 241 bool high_temp;
239}; 242};
240 243
@@ -294,6 +297,8 @@ extern const struct iwl_cfg iwl3160_2ac_cfg;
294extern const struct iwl_cfg iwl3160_2n_cfg; 297extern const struct iwl_cfg iwl3160_2n_cfg;
295extern const struct iwl_cfg iwl3160_n_cfg; 298extern const struct iwl_cfg iwl3160_n_cfg;
296extern const struct iwl_cfg iwl7265_2ac_cfg; 299extern const struct iwl_cfg iwl7265_2ac_cfg;
300extern const struct iwl_cfg iwl7265_2n_cfg;
301extern const struct iwl_cfg iwl7265_n_cfg;
297#endif /* CONFIG_IWLMVM */ 302#endif /* CONFIG_IWLMVM */
298 303
299#endif /* __IWL_CONFIG_H__ */ 304#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 54a4fdc631b7..da4eca8b3007 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -495,14 +495,11 @@ enum secure_load_status_reg {
495 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit 495 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
496 * 496 *
497 * default interrupt coalescing timer is 64 x 32 = 2048 usecs 497 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
498 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
499 */ 498 */
500#define IWL_HOST_INT_TIMEOUT_MAX (0xFF) 499#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
501#define IWL_HOST_INT_TIMEOUT_DEF (0x40) 500#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
502#define IWL_HOST_INT_TIMEOUT_MIN (0x0) 501#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
503#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) 502#define IWL_HOST_INT_OPER_MODE BIT(31)
504#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
505#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
506 503
507/***************************************************************************** 504/*****************************************************************************
508 * 7000/3000 series SHR DTS addresses * 505 * 7000/3000 series SHR DTS addresses *
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 5d066cbc5ac7..75b72a956552 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -391,7 +391,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
391 BT_VALID_LUT | 391 BT_VALID_LUT |
392 BT_VALID_WIFI_RX_SW_PRIO_BOOST | 392 BT_VALID_WIFI_RX_SW_PRIO_BOOST |
393 BT_VALID_WIFI_TX_SW_PRIO_BOOST | 393 BT_VALID_WIFI_TX_SW_PRIO_BOOST |
394 BT_VALID_MULTI_PRIO_LUT |
395 BT_VALID_CORUN_LUT_20 | 394 BT_VALID_CORUN_LUT_20 |
396 BT_VALID_CORUN_LUT_40 | 395 BT_VALID_CORUN_LUT_40 |
397 BT_VALID_ANT_ISOLATION | 396 BT_VALID_ANT_ISOLATION |
@@ -842,6 +841,11 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
842 841
843 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], 842 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
844 lockdep_is_held(&mvm->mutex)); 843 lockdep_is_held(&mvm->mutex));
844
845 /* This can happen if the station has been removed right now */
846 if (IS_ERR_OR_NULL(sta))
847 return;
848
845 mvmsta = (void *)sta->drv_priv; 849 mvmsta = (void *)sta->drv_priv;
846 850
847 data->num_bss_ifaces++; 851 data->num_bss_ifaces++;
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 6f45966817bb..b9b81e881dd0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -895,7 +895,7 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
895 /* new API returns next, not last-used seqno */ 895 /* new API returns next, not last-used seqno */
896 if (mvm->fw->ucode_capa.flags & 896 if (mvm->fw->ucode_capa.flags &
897 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) 897 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
898 err -= 0x10; 898 err = (u16) (err - 0x10);
899 } 899 }
900 900
901 iwl_free_resp(&cmd); 901 iwl_free_resp(&cmd);
@@ -1549,7 +1549,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1549 if (gtkdata.unhandled_cipher) 1549 if (gtkdata.unhandled_cipher)
1550 return false; 1550 return false;
1551 if (!gtkdata.num_keys) 1551 if (!gtkdata.num_keys)
1552 return true; 1552 goto out;
1553 if (!gtkdata.last_gtk) 1553 if (!gtkdata.last_gtk)
1554 return false; 1554 return false;
1555 1555
@@ -1600,6 +1600,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1600 (void *)&replay_ctr, GFP_KERNEL); 1600 (void *)&replay_ctr, GFP_KERNEL);
1601 } 1601 }
1602 1602
1603out:
1603 mvmvif->seqno_valid = true; 1604 mvmvif->seqno_valid = true;
1604 /* +0x10 because the set API expects next-to-use, not last-used */ 1605 /* +0x10 because the set API expects next-to-use, not last-used */
1605 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; 1606 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 9864d713eb2c..a8fe6b41f9a3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -119,6 +119,10 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
119 119
120 if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) 120 if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
121 return -EINVAL; 121 return -EINVAL;
122 if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
123 return -EINVAL;
124 if (drain < 0 || drain > 1)
125 return -EINVAL;
122 126
123 mutex_lock(&mvm->mutex); 127 mutex_lock(&mvm->mutex);
124 128
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 33cf56fdfc41..95ce4b601fef 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -176,8 +176,11 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
176 * P2P Device discoveribility, while there are other higher priority 176 * P2P Device discoveribility, while there are other higher priority
177 * events in the system). 177 * events in the system).
178 */ 178 */
179 if (WARN_ONCE(!le32_to_cpu(notif->status), 179 if (!le32_to_cpu(notif->status)) {
180 "Failed to schedule time event\n")) { 180 bool start = le32_to_cpu(notif->action) &
181 TE_V2_NOTIF_HOST_EVENT_START;
182 IWL_WARN(mvm, "Time Event %s notification failure\n",
183 start ? "start" : "end");
181 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { 184 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
182 iwl_mvm_te_clear_data(mvm, te_data); 185 iwl_mvm_te_clear_data(mvm, te_data);
183 return; 186 return;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 941c0c88f982..86605027c41d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -353,6 +353,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
353 353
354/* 7265 Series */ 354/* 7265 Series */
355 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 355 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
356 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
360 {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)},
361 {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
365 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
366 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
367 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
374 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
356#endif /* CONFIG_IWLMVM */ 377#endif /* CONFIG_IWLMVM */
357 378
358 {0} 379 {0}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index fa22639b63c9..051268c037b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -477,4 +477,12 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
477 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 477 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
478} 478}
479 479
480static inline void iwl_nic_error(struct iwl_trans *trans)
481{
482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
483
484 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
485 iwl_op_mode_nic_error(trans->op_mode);
486}
487
480#endif /* __iwl_trans_int_pcie_h__ */ 488#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 3f237b42eb36..be3995afa9d0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -489,6 +489,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
489 489
490 /* Set interrupt coalescing timer to default (2048 usecs) */ 490 /* Set interrupt coalescing timer to default (2048 usecs) */
491 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 491 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
492
493 /* W/A for interrupt coalescing bug in 7260 and 3160 */
494 if (trans->cfg->host_interrupt_operation_mode)
495 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
492} 496}
493 497
494static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 498static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
@@ -796,12 +800,13 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
796 iwl_pcie_dump_csr(trans); 800 iwl_pcie_dump_csr(trans);
797 iwl_dump_fh(trans, NULL); 801 iwl_dump_fh(trans, NULL);
798 802
803 /* set the ERROR bit before we wake up the caller */
799 set_bit(STATUS_FW_ERROR, &trans_pcie->status); 804 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
800 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 805 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
801 wake_up(&trans_pcie->wait_command_queue); 806 wake_up(&trans_pcie->wait_command_queue);
802 807
803 local_bh_disable(); 808 local_bh_disable();
804 iwl_op_mode_nic_error(trans->op_mode); 809 iwl_nic_error(trans);
805 local_bh_enable(); 810 local_bh_enable();
806} 811}
807 812
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 5d9337bec67a..cde9c16f6e4f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -279,9 +279,6 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
279 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 279 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
280 iwl_pcie_apm_init(trans); 280 iwl_pcie_apm_init(trans);
281 281
282 /* Set interrupt coalescing calibration timer to default (512 usecs) */
283 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
284
285 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 282 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
286 283
287 iwl_pcie_set_pwr(trans, false); 284 iwl_pcie_set_pwr(trans, false);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 059c5acad3a0..0adde919a258 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
207 IWL_ERR(trans, "scratch %d = 0x%08x\n", i, 207 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
208 le32_to_cpu(txq->scratchbufs[i].scratch)); 208 le32_to_cpu(txq->scratchbufs[i].scratch));
209 209
210 iwl_op_mode_nic_error(trans->op_mode); 210 iwl_nic_error(trans);
211} 211}
212 212
213/* 213/*
@@ -1023,7 +1023,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1023 if (nfreed++ > 0) { 1023 if (nfreed++ > 0) {
1024 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1024 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1025 idx, q->write_ptr, q->read_ptr); 1025 idx, q->write_ptr, q->read_ptr);
1026 iwl_op_mode_nic_error(trans->op_mode); 1026 iwl_nic_error(trans);
1027 } 1027 }
1028 } 1028 }
1029 1029
@@ -1562,7 +1562,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1562 get_cmd_string(trans_pcie, cmd->id)); 1562 get_cmd_string(trans_pcie, cmd->id));
1563 ret = -ETIMEDOUT; 1563 ret = -ETIMEDOUT;
1564 1564
1565 iwl_op_mode_nic_error(trans->op_mode); 1565 iwl_nic_error(trans);
1566 1566
1567 goto cancel; 1567 goto cancel;
1568 } 1568 }
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 668dd27616a0..cc6a0a586f0b 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -913,7 +913,10 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
913 char *p2; 913 char *p2;
914 struct debug_data *d = f->private_data; 914 struct debug_data *d = f->private_data;
915 915
916 pdata = kmalloc(cnt, GFP_KERNEL); 916 if (cnt == 0)
917 return 0;
918
919 pdata = kmalloc(cnt + 1, GFP_KERNEL);
917 if (pdata == NULL) 920 if (pdata == NULL)
918 return 0; 921 return 0;
919 922
@@ -922,6 +925,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
922 kfree(pdata); 925 kfree(pdata);
923 return 0; 926 return 0;
924 } 927 }
928 pdata[cnt] = '\0';
925 929
926 p0 = pdata; 930 p0 = pdata;
927 for (i = 0; i < num_of_items; i++) { 931 for (i = 0; i < num_of_items; i++) {
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index ef8c98e21098..f499efc6abcf 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -902,6 +902,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
902 if (card->model == MODEL_UNKNOWN) { 902 if (card->model == MODEL_UNKNOWN) {
903 pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n", 903 pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n",
904 p_dev->manf_id, p_dev->card_id); 904 p_dev->manf_id, p_dev->card_id);
905 ret = -ENODEV;
905 goto out2; 906 goto out2;
906 } 907 }
907 908
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index de0df86704e7..c72438bb2faf 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -383,6 +383,14 @@ struct hwsim_radiotap_hdr {
383 __le16 rt_chbitmask; 383 __le16 rt_chbitmask;
384} __packed; 384} __packed;
385 385
386struct hwsim_radiotap_ack_hdr {
387 struct ieee80211_radiotap_header hdr;
388 u8 rt_flags;
389 u8 pad;
390 __le16 rt_channel;
391 __le16 rt_chbitmask;
392} __packed;
393
386/* MAC80211_HWSIM netlinf family */ 394/* MAC80211_HWSIM netlinf family */
387static struct genl_family hwsim_genl_family = { 395static struct genl_family hwsim_genl_family = {
388 .id = GENL_ID_GENERATE, 396 .id = GENL_ID_GENERATE,
@@ -500,7 +508,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
500 const u8 *addr) 508 const u8 *addr)
501{ 509{
502 struct sk_buff *skb; 510 struct sk_buff *skb;
503 struct hwsim_radiotap_hdr *hdr; 511 struct hwsim_radiotap_ack_hdr *hdr;
504 u16 flags; 512 u16 flags;
505 struct ieee80211_hdr *hdr11; 513 struct ieee80211_hdr *hdr11;
506 514
@@ -511,14 +519,14 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
511 if (skb == NULL) 519 if (skb == NULL)
512 return; 520 return;
513 521
514 hdr = (struct hwsim_radiotap_hdr *) skb_put(skb, sizeof(*hdr)); 522 hdr = (struct hwsim_radiotap_ack_hdr *) skb_put(skb, sizeof(*hdr));
515 hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; 523 hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
516 hdr->hdr.it_pad = 0; 524 hdr->hdr.it_pad = 0;
517 hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); 525 hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
518 hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 526 hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
519 (1 << IEEE80211_RADIOTAP_CHANNEL)); 527 (1 << IEEE80211_RADIOTAP_CHANNEL));
520 hdr->rt_flags = 0; 528 hdr->rt_flags = 0;
521 hdr->rt_rate = 0; 529 hdr->pad = 0;
522 hdr->rt_channel = cpu_to_le16(chan->center_freq); 530 hdr->rt_channel = cpu_to_le16(chan->center_freq);
523 flags = IEEE80211_CHAN_2GHZ; 531 flags = IEEE80211_CHAN_2GHZ;
524 hdr->rt_chbitmask = cpu_to_le16(flags); 532 hdr->rt_chbitmask = cpu_to_le16(flags);
@@ -1230,7 +1238,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
1230 HRTIMER_MODE_REL); 1238 HRTIMER_MODE_REL);
1231 } else if (!info->enable_beacon) { 1239 } else if (!info->enable_beacon) {
1232 unsigned int count = 0; 1240 unsigned int count = 0;
1233 ieee80211_iterate_active_interfaces( 1241 ieee80211_iterate_active_interfaces_atomic(
1234 data->hw, IEEE80211_IFACE_ITER_NORMAL, 1242 data->hw, IEEE80211_IFACE_ITER_NORMAL,
1235 mac80211_hwsim_bcn_en_iter, &count); 1243 mac80211_hwsim_bcn_en_iter, &count);
1236 wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u", 1244 wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u",
@@ -2097,7 +2105,7 @@ out:
2097} 2105}
2098 2106
2099/* Generic Netlink operations array */ 2107/* Generic Netlink operations array */
2100static struct genl_ops hwsim_ops[] = { 2108static const struct genl_ops hwsim_ops[] = {
2101 { 2109 {
2102 .cmd = HWSIM_CMD_REGISTER, 2110 .cmd = HWSIM_CMD_REGISTER,
2103 .policy = hwsim_genl_policy, 2111 .policy = hwsim_genl_policy,
@@ -2148,8 +2156,7 @@ static int hwsim_init_netlink(void)
2148 2156
2149 printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); 2157 printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
2150 2158
2151 rc = genl_register_family_with_ops(&hwsim_genl_family, 2159 rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops);
2152 hwsim_ops, ARRAY_SIZE(hwsim_ops));
2153 if (rc) 2160 if (rc)
2154 goto failure; 2161 goto failure;
2155 2162
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index fbad00a5abc8..aeaea0e3b4c4 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -2210,8 +2210,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2210 priv->bss_started = 0; 2210 priv->bss_started = 0;
2211 priv->bss_num = 0; 2211 priv->bss_num = 0;
2212 2212
2213 if (mwifiex_cfg80211_init_p2p_client(priv)) 2213 if (mwifiex_cfg80211_init_p2p_client(priv)) {
2214 return ERR_PTR(-EFAULT); 2214 wdev = ERR_PTR(-EFAULT);
2215 goto done;
2216 }
2215 2217
2216 break; 2218 break;
2217 default: 2219 default:
@@ -2224,7 +2226,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2224 if (!dev) { 2226 if (!dev) {
2225 wiphy_err(wiphy, "no memory available for netdevice\n"); 2227 wiphy_err(wiphy, "no memory available for netdevice\n");
2226 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; 2228 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
2227 return ERR_PTR(-ENOMEM); 2229 wdev = ERR_PTR(-ENOMEM);
2230 goto done;
2228 } 2231 }
2229 2232
2230 mwifiex_init_priv_params(priv, dev); 2233 mwifiex_init_priv_params(priv, dev);
@@ -2264,7 +2267,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2264 wiphy_err(wiphy, "cannot register virtual network device\n"); 2267 wiphy_err(wiphy, "cannot register virtual network device\n");
2265 free_netdev(dev); 2268 free_netdev(dev);
2266 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; 2269 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
2267 return ERR_PTR(-EFAULT); 2270 priv->netdev = NULL;
2271 wdev = ERR_PTR(-EFAULT);
2272 goto done;
2268 } 2273 }
2269 2274
2270 sema_init(&priv->async_sem, 1); 2275 sema_init(&priv->async_sem, 1);
@@ -2274,6 +2279,13 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2274#ifdef CONFIG_DEBUG_FS 2279#ifdef CONFIG_DEBUG_FS
2275 mwifiex_dev_debugfs_init(priv); 2280 mwifiex_dev_debugfs_init(priv);
2276#endif 2281#endif
2282
2283done:
2284 if (IS_ERR(wdev)) {
2285 kfree(priv->wdev);
2286 priv->wdev = NULL;
2287 }
2288
2277 return wdev; 2289 return wdev;
2278} 2290}
2279EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf); 2291EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
@@ -2298,7 +2310,10 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2298 unregister_netdevice(wdev->netdev); 2310 unregister_netdevice(wdev->netdev);
2299 2311
2300 /* Clear the priv in adapter */ 2312 /* Clear the priv in adapter */
2313 priv->netdev->ieee80211_ptr = NULL;
2301 priv->netdev = NULL; 2314 priv->netdev = NULL;
2315 kfree(wdev);
2316 priv->wdev = NULL;
2302 2317
2303 priv->media_connected = false; 2318 priv->media_connected = false;
2304 2319
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index f80f30b6160e..c8385ec77a86 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -1020,8 +1020,8 @@ struct mwifiex_power_group {
1020} __packed; 1020} __packed;
1021 1021
1022struct mwifiex_types_power_group { 1022struct mwifiex_types_power_group {
1023 u16 type; 1023 __le16 type;
1024 u16 length; 1024 __le16 length;
1025} __packed; 1025} __packed;
1026 1026
1027struct host_cmd_ds_txpwr_cfg { 1027struct host_cmd_ds_txpwr_cfg {
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 220af4fe0fc6..81ac001ee741 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -82,7 +82,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
82 struct mwifiex_ie_list *ie_list) 82 struct mwifiex_ie_list *ie_list)
83{ 83{
84 u16 travel_len, index, mask; 84 u16 travel_len, index, mask;
85 s16 input_len; 85 s16 input_len, tlv_len;
86 struct mwifiex_ie *ie; 86 struct mwifiex_ie *ie;
87 u8 *tmp; 87 u8 *tmp;
88 88
@@ -91,11 +91,13 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
91 91
92 ie_list->len = 0; 92 ie_list->len = 0;
93 93
94 while (input_len > 0) { 94 while (input_len >= sizeof(struct mwifiex_ie_types_header)) {
95 ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len); 95 ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len);
96 input_len -= le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE; 96 tlv_len = le16_to_cpu(ie->ie_length);
97 travel_len += le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE; 97 travel_len += tlv_len + MWIFIEX_IE_HDR_SIZE;
98 98
99 if (input_len < tlv_len + MWIFIEX_IE_HDR_SIZE)
100 return -1;
99 index = le16_to_cpu(ie->ie_index); 101 index = le16_to_cpu(ie->ie_index);
100 mask = le16_to_cpu(ie->mgmt_subtype_mask); 102 mask = le16_to_cpu(ie->mgmt_subtype_mask);
101 103
@@ -132,6 +134,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
132 le16_add_cpu(&ie_list->len, 134 le16_add_cpu(&ie_list->len,
133 le16_to_cpu(priv->mgmt_ie[index].ie_length) + 135 le16_to_cpu(priv->mgmt_ie[index].ie_length) +
134 MWIFIEX_IE_HDR_SIZE); 136 MWIFIEX_IE_HDR_SIZE);
137 input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE;
135 } 138 }
136 139
137 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) 140 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9d7c9d354d34..78e8a6666cc6 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -411,13 +411,14 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
411 */ 411 */
412static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) 412static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
413{ 413{
414 int ret, i; 414 int ret;
415 char fmt[64]; 415 char fmt[64];
416 struct mwifiex_private *priv; 416 struct mwifiex_private *priv;
417 struct mwifiex_adapter *adapter = context; 417 struct mwifiex_adapter *adapter = context;
418 struct mwifiex_fw_image fw; 418 struct mwifiex_fw_image fw;
419 struct semaphore *sem = adapter->card_sem; 419 struct semaphore *sem = adapter->card_sem;
420 bool init_failed = false; 420 bool init_failed = false;
421 struct wireless_dev *wdev;
421 422
422 if (!firmware) { 423 if (!firmware) {
423 dev_err(adapter->dev, 424 dev_err(adapter->dev,
@@ -469,14 +470,16 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
469 priv = adapter->priv[MWIFIEX_BSS_ROLE_STA]; 470 priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
470 if (mwifiex_register_cfg80211(adapter)) { 471 if (mwifiex_register_cfg80211(adapter)) {
471 dev_err(adapter->dev, "cannot register with cfg80211\n"); 472 dev_err(adapter->dev, "cannot register with cfg80211\n");
472 goto err_register_cfg80211; 473 goto err_init_fw;
473 } 474 }
474 475
475 rtnl_lock(); 476 rtnl_lock();
476 /* Create station interface by default */ 477 /* Create station interface by default */
477 if (!mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", 478 wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d",
478 NL80211_IFTYPE_STATION, NULL, NULL)) { 479 NL80211_IFTYPE_STATION, NULL, NULL);
480 if (IS_ERR(wdev)) {
479 dev_err(adapter->dev, "cannot create default STA interface\n"); 481 dev_err(adapter->dev, "cannot create default STA interface\n");
482 rtnl_unlock();
480 goto err_add_intf; 483 goto err_add_intf;
481 } 484 }
482 rtnl_unlock(); 485 rtnl_unlock();
@@ -486,17 +489,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
486 goto done; 489 goto done;
487 490
488err_add_intf: 491err_add_intf:
489 for (i = 0; i < adapter->priv_num; i++) {
490 priv = adapter->priv[i];
491
492 if (!priv)
493 continue;
494
495 if (priv->wdev && priv->netdev)
496 mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
497 }
498 rtnl_unlock();
499err_register_cfg80211:
500 wiphy_unregister(adapter->wiphy); 492 wiphy_unregister(adapter->wiphy);
501 wiphy_free(adapter->wiphy); 493 wiphy_free(adapter->wiphy);
502err_init_fw: 494err_init_fw:
@@ -1006,12 +998,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
1006 wiphy_unregister(priv->wdev->wiphy); 998 wiphy_unregister(priv->wdev->wiphy);
1007 wiphy_free(priv->wdev->wiphy); 999 wiphy_free(priv->wdev->wiphy);
1008 1000
1009 for (i = 0; i < adapter->priv_num; i++) {
1010 priv = adapter->priv[i];
1011 if (priv)
1012 kfree(priv->wdev);
1013 }
1014
1015 mwifiex_terminate_workqueue(adapter); 1001 mwifiex_terminate_workqueue(adapter);
1016 1002
1017 /* Unregister device */ 1003 /* Unregister device */
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 33fa9432b241..03688aa14e8a 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -232,7 +232,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
232 } 232 }
233 233
234 mwifiex_remove_card(card->adapter, &add_remove_card_sem); 234 mwifiex_remove_card(card->adapter, &add_remove_card_sem);
235 kfree(card);
236} 235}
237 236
238static void mwifiex_pcie_shutdown(struct pci_dev *pdev) 237static void mwifiex_pcie_shutdown(struct pci_dev *pdev)
@@ -2313,6 +2312,7 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
2313 pci_release_region(pdev, 0); 2312 pci_release_region(pdev, 0);
2314 pci_set_drvdata(pdev, NULL); 2313 pci_set_drvdata(pdev, NULL);
2315 } 2314 }
2315 kfree(card);
2316} 2316}
2317 2317
2318/* 2318/*
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 1576104e3d95..b44a31523461 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -196,7 +196,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
196 } 196 }
197 197
198 mwifiex_remove_card(card->adapter, &add_remove_card_sem); 198 mwifiex_remove_card(card->adapter, &add_remove_card_sem);
199 kfree(card);
200} 199}
201 200
202/* 201/*
@@ -1029,7 +1028,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
1029 struct sk_buff *skb, u32 upld_typ) 1028 struct sk_buff *skb, u32 upld_typ)
1030{ 1029{
1031 u8 *cmd_buf; 1030 u8 *cmd_buf;
1031 __le16 *curr_ptr = (__le16 *)skb->data;
1032 u16 pkt_len = le16_to_cpu(*curr_ptr);
1032 1033
1034 skb_trim(skb, pkt_len);
1033 skb_pull(skb, INTF_HEADER_LEN); 1035 skb_pull(skb, INTF_HEADER_LEN);
1034 1036
1035 switch (upld_typ) { 1037 switch (upld_typ) {
@@ -1742,7 +1744,6 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
1742 sdio_claim_host(card->func); 1744 sdio_claim_host(card->func);
1743 sdio_disable_func(card->func); 1745 sdio_disable_func(card->func);
1744 sdio_release_host(card->func); 1746 sdio_release_host(card->func);
1745 sdio_set_drvdata(card->func, NULL);
1746 } 1747 }
1747} 1748}
1748 1749
@@ -1770,7 +1771,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1770 return ret; 1771 return ret;
1771 } 1772 }
1772 1773
1773 sdio_set_drvdata(func, card);
1774 1774
1775 adapter->dev = &func->dev; 1775 adapter->dev = &func->dev;
1776 1776
@@ -1798,6 +1798,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
1798 int ret; 1798 int ret;
1799 u8 sdio_ireg; 1799 u8 sdio_ireg;
1800 1800
1801 sdio_set_drvdata(card->func, card);
1802
1801 /* 1803 /*
1802 * Read the HOST_INT_STATUS_REG for ACK the first interrupt got 1804 * Read the HOST_INT_STATUS_REG for ACK the first interrupt got
1803 * from the bootloader. If we don't do this we get a interrupt 1805 * from the bootloader. If we don't do this we get a interrupt
@@ -1880,6 +1882,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
1880 kfree(card->mpa_rx.len_arr); 1882 kfree(card->mpa_rx.len_arr);
1881 kfree(card->mpa_tx.buf); 1883 kfree(card->mpa_tx.buf);
1882 kfree(card->mpa_rx.buf); 1884 kfree(card->mpa_rx.buf);
1885 sdio_set_drvdata(card->func, NULL);
1886 kfree(card);
1883} 1887}
1884 1888
1885/* 1889/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 7d66018a2e33..2181ee283d82 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -239,14 +239,14 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
239 memmove(cmd_txp_cfg, txp, 239 memmove(cmd_txp_cfg, txp,
240 sizeof(struct host_cmd_ds_txpwr_cfg) + 240 sizeof(struct host_cmd_ds_txpwr_cfg) +
241 sizeof(struct mwifiex_types_power_group) + 241 sizeof(struct mwifiex_types_power_group) +
242 pg_tlv->length); 242 le16_to_cpu(pg_tlv->length));
243 243
244 pg_tlv = (struct mwifiex_types_power_group *) ((u8 *) 244 pg_tlv = (struct mwifiex_types_power_group *) ((u8 *)
245 cmd_txp_cfg + 245 cmd_txp_cfg +
246 sizeof(struct host_cmd_ds_txpwr_cfg)); 246 sizeof(struct host_cmd_ds_txpwr_cfg));
247 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size) + 247 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size) +
248 sizeof(struct mwifiex_types_power_group) + 248 sizeof(struct mwifiex_types_power_group) +
249 pg_tlv->length); 249 le16_to_cpu(pg_tlv->length));
250 } else { 250 } else {
251 memmove(cmd_txp_cfg, txp, sizeof(*txp)); 251 memmove(cmd_txp_cfg, txp, sizeof(*txp));
252 } 252 }
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 58a6013712d2..2675ca7f8d14 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -274,17 +274,20 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
274 struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg; 274 struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg;
275 struct mwifiex_rate_scope *rate_scope; 275 struct mwifiex_rate_scope *rate_scope;
276 struct mwifiex_ie_types_header *head; 276 struct mwifiex_ie_types_header *head;
277 u16 tlv, tlv_buf_len; 277 u16 tlv, tlv_buf_len, tlv_buf_left;
278 u8 *tlv_buf; 278 u8 *tlv_buf;
279 u32 i; 279 u32 i;
280 280
281 tlv_buf = ((u8 *)rate_cfg) + 281 tlv_buf = ((u8 *)rate_cfg) + sizeof(struct host_cmd_ds_tx_rate_cfg);
282 sizeof(struct host_cmd_ds_tx_rate_cfg); 282 tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*rate_cfg);
283 tlv_buf_len = le16_to_cpu(*(__le16 *) (tlv_buf + sizeof(u16)));
284 283
285 while (tlv_buf && tlv_buf_len > 0) { 284 while (tlv_buf_left >= sizeof(*head)) {
286 tlv = (*tlv_buf); 285 head = (struct mwifiex_ie_types_header *)tlv_buf;
287 tlv = tlv | (*(tlv_buf + 1) << 8); 286 tlv = le16_to_cpu(head->type);
287 tlv_buf_len = le16_to_cpu(head->len);
288
289 if (tlv_buf_left < (sizeof(*head) + tlv_buf_len))
290 break;
288 291
289 switch (tlv) { 292 switch (tlv) {
290 case TLV_TYPE_RATE_SCOPE: 293 case TLV_TYPE_RATE_SCOPE:
@@ -304,9 +307,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
304 /* Add RATE_DROP tlv here */ 307 /* Add RATE_DROP tlv here */
305 } 308 }
306 309
307 head = (struct mwifiex_ie_types_header *) tlv_buf; 310 tlv_buf += (sizeof(*head) + tlv_buf_len);
308 tlv_buf += le16_to_cpu(head->len) + sizeof(*head); 311 tlv_buf_left -= (sizeof(*head) + tlv_buf_len);
309 tlv_buf_len -= le16_to_cpu(head->len);
310 } 312 }
311 313
312 priv->is_data_rate_auto = mwifiex_is_rate_auto(priv); 314 priv->is_data_rate_auto = mwifiex_is_rate_auto(priv);
@@ -340,13 +342,17 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
340 ((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg)); 342 ((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg));
341 pg = (struct mwifiex_power_group *) 343 pg = (struct mwifiex_power_group *)
342 ((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group)); 344 ((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group));
343 length = pg_tlv_hdr->length; 345 length = le16_to_cpu(pg_tlv_hdr->length);
344 if (length > 0) { 346
345 max_power = pg->power_max; 347 /* At least one structure required to update power */
346 min_power = pg->power_min; 348 if (length < sizeof(struct mwifiex_power_group))
347 length -= sizeof(struct mwifiex_power_group); 349 return 0;
348 } 350
349 while (length) { 351 max_power = pg->power_max;
352 min_power = pg->power_min;
353 length -= sizeof(struct mwifiex_power_group);
354
355 while (length >= sizeof(struct mwifiex_power_group)) {
350 pg++; 356 pg++;
351 if (max_power < pg->power_max) 357 if (max_power < pg->power_max)
352 max_power = pg->power_max; 358 max_power = pg->power_max;
@@ -356,10 +362,8 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
356 362
357 length -= sizeof(struct mwifiex_power_group); 363 length -= sizeof(struct mwifiex_power_group);
358 } 364 }
359 if (pg_tlv_hdr->length > 0) { 365 priv->min_tx_power_level = (u8) min_power;
360 priv->min_tx_power_level = (u8) min_power; 366 priv->max_tx_power_level = (u8) max_power;
361 priv->max_tx_power_level = (u8) max_power;
362 }
363 367
364 return 0; 368 return 0;
365} 369}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index f084412eee0b..a09398fe9e2a 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -319,8 +319,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
319 if (bss_desc && bss_desc->ssid.ssid_len && 319 if (bss_desc && bss_desc->ssid.ssid_len &&
320 (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. 320 (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
321 ssid, &bss_desc->ssid))) { 321 ssid, &bss_desc->ssid))) {
322 kfree(bss_desc); 322 ret = 0;
323 return 0; 323 goto done;
324 } 324 }
325 325
326 /* Exit Adhoc mode first */ 326 /* Exit Adhoc mode first */
@@ -638,8 +638,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
638 txp_cfg->mode = cpu_to_le32(1); 638 txp_cfg->mode = cpu_to_le32(1);
639 pg_tlv = (struct mwifiex_types_power_group *) 639 pg_tlv = (struct mwifiex_types_power_group *)
640 (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); 640 (buf + sizeof(struct host_cmd_ds_txpwr_cfg));
641 pg_tlv->type = TLV_TYPE_POWER_GROUP; 641 pg_tlv->type = cpu_to_le16(TLV_TYPE_POWER_GROUP);
642 pg_tlv->length = 4 * sizeof(struct mwifiex_power_group); 642 pg_tlv->length =
643 cpu_to_le16(4 * sizeof(struct mwifiex_power_group));
643 pg = (struct mwifiex_power_group *) 644 pg = (struct mwifiex_power_group *)
644 (buf + sizeof(struct host_cmd_ds_txpwr_cfg) 645 (buf + sizeof(struct host_cmd_ds_txpwr_cfg)
645 + sizeof(struct mwifiex_types_power_group)); 646 + sizeof(struct mwifiex_types_power_group));
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 1cfe5a738c47..92f76d655e6c 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -97,6 +97,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
97 struct mwifiex_txinfo *tx_info; 97 struct mwifiex_txinfo *tx_info;
98 int hdr_chop; 98 int hdr_chop;
99 struct timeval tv; 99 struct timeval tv;
100 struct ethhdr *p_ethhdr;
100 u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; 101 u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
101 102
102 uap_rx_pd = (struct uap_rxpd *)(skb->data); 103 uap_rx_pd = (struct uap_rxpd *)(skb->data);
@@ -112,14 +113,36 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
112 } 113 }
113 114
114 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, 115 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
115 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) 116 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
117 /* Replace the 803 header and rfc1042 header (llc/snap) with
118 * an Ethernet II header, keep the src/dst and snap_type
119 * (ethertype).
120 *
121 * The firmware only passes up SNAP frames converting all RX
122 * data from 802.11 to 802.2/LLC/SNAP frames.
123 *
124 * To create the Ethernet II, just move the src, dst address
125 * right before the snap_type.
126 */
127 p_ethhdr = (struct ethhdr *)
128 ((u8 *)(&rx_pkt_hdr->eth803_hdr)
129 + sizeof(rx_pkt_hdr->eth803_hdr)
130 + sizeof(rx_pkt_hdr->rfc1042_hdr)
131 - sizeof(rx_pkt_hdr->eth803_hdr.h_dest)
132 - sizeof(rx_pkt_hdr->eth803_hdr.h_source)
133 - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
134 memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
135 sizeof(p_ethhdr->h_source));
136 memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
137 sizeof(p_ethhdr->h_dest));
116 /* Chop off the rxpd + the excess memory from 138 /* Chop off the rxpd + the excess memory from
117 * 802.2/llc/snap header that was removed. 139 * 802.2/llc/snap header that was removed.
118 */ 140 */
119 hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd; 141 hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd;
120 else 142 } else {
121 /* Chop off the rxpd */ 143 /* Chop off the rxpd */
122 hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd; 144 hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
145 }
123 146
124 /* Chop off the leading header bytes so the it points 147 /* Chop off the leading header bytes so the it points
125 * to the start of either the reconstructed EthII frame 148 * to the start of either the reconstructed EthII frame
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 1c70b8d09227..edf5b7a24900 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -350,7 +350,6 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
350 350
351 card->udev = udev; 351 card->udev = udev;
352 card->intf = intf; 352 card->intf = intf;
353 usb_card = card;
354 353
355 pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n", 354 pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n",
356 udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass, 355 udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass,
@@ -525,25 +524,28 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
525static void mwifiex_usb_disconnect(struct usb_interface *intf) 524static void mwifiex_usb_disconnect(struct usb_interface *intf)
526{ 525{
527 struct usb_card_rec *card = usb_get_intfdata(intf); 526 struct usb_card_rec *card = usb_get_intfdata(intf);
528 struct mwifiex_adapter *adapter;
529 527
530 if (!card || !card->adapter) { 528 if (!card) {
531 pr_err("%s: card or card->adapter is NULL\n", __func__); 529 pr_err("%s: card is NULL\n", __func__);
532 return; 530 return;
533 } 531 }
534 532
535 adapter = card->adapter;
536 if (!adapter->priv_num)
537 return;
538
539 mwifiex_usb_free(card); 533 mwifiex_usb_free(card);
540 534
541 dev_dbg(adapter->dev, "%s: removing card\n", __func__); 535 if (card->adapter) {
542 mwifiex_remove_card(adapter, &add_remove_card_sem); 536 struct mwifiex_adapter *adapter = card->adapter;
537
538 if (!adapter->priv_num)
539 return;
540
541 dev_dbg(adapter->dev, "%s: removing card\n", __func__);
542 mwifiex_remove_card(adapter, &add_remove_card_sem);
543 }
543 544
544 usb_set_intfdata(intf, NULL); 545 usb_set_intfdata(intf, NULL);
545 usb_put_dev(interface_to_usbdev(intf)); 546 usb_put_dev(interface_to_usbdev(intf));
546 kfree(card); 547 kfree(card);
548 usb_card = NULL;
547 549
548 return; 550 return;
549} 551}
@@ -754,6 +756,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
754 card->adapter = adapter; 756 card->adapter = adapter;
755 adapter->dev = &card->udev->dev; 757 adapter->dev = &card->udev->dev;
756 strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME); 758 strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
759 usb_card = card;
757 760
758 return 0; 761 return 0;
759} 762}
@@ -762,7 +765,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
762{ 765{
763 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; 766 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
764 767
765 usb_set_intfdata(card->intf, NULL); 768 card->adapter = NULL;
766} 769}
767 770
768static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, 771static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
@@ -1004,7 +1007,7 @@ static void mwifiex_usb_cleanup_module(void)
1004 if (!down_interruptible(&add_remove_card_sem)) 1007 if (!down_interruptible(&add_remove_card_sem))
1005 up(&add_remove_card_sem); 1008 up(&add_remove_card_sem);
1006 1009
1007 if (usb_card) { 1010 if (usb_card && usb_card->adapter) {
1008 struct mwifiex_adapter *adapter = usb_card->adapter; 1011 struct mwifiex_adapter *adapter = usb_card->adapter;
1009 int i; 1012 int i;
1010 1013
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 5dd0ccc70b86..13eaeed03898 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -722,6 +722,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
722 tlv_hdr = (struct mwifiex_ie_types_data *) curr; 722 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
723 tlv_len = le16_to_cpu(tlv_hdr->header.len); 723 tlv_len = le16_to_cpu(tlv_hdr->header.len);
724 724
725 if (resp_len < tlv_len + sizeof(tlv_hdr->header))
726 break;
727
725 switch (le16_to_cpu(tlv_hdr->header.type)) { 728 switch (le16_to_cpu(tlv_hdr->header.type)) {
726 case TLV_TYPE_WMMQSTATUS: 729 case TLV_TYPE_WMMQSTATUS:
727 tlv_wmm_qstatus = 730 tlv_wmm_qstatus =
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 41a16d30c79c..e05d9b4c8317 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -811,6 +811,10 @@ static const struct net_device_ops islpci_netdev_ops = {
811 .ndo_validate_addr = eth_validate_addr, 811 .ndo_validate_addr = eth_validate_addr,
812}; 812};
813 813
814static struct device_type wlan_type = {
815 .name = "wlan",
816};
817
814struct net_device * 818struct net_device *
815islpci_setup(struct pci_dev *pdev) 819islpci_setup(struct pci_dev *pdev)
816{ 820{
@@ -821,9 +825,8 @@ islpci_setup(struct pci_dev *pdev)
821 return ndev; 825 return ndev;
822 826
823 pci_set_drvdata(pdev, ndev); 827 pci_set_drvdata(pdev, ndev);
824#if defined(SET_NETDEV_DEV)
825 SET_NETDEV_DEV(ndev, &pdev->dev); 828 SET_NETDEV_DEV(ndev, &pdev->dev);
826#endif 829 SET_NETDEV_DEVTYPE(ndev, &wlan_type);
827 830
828 /* setup the structure members */ 831 /* setup the structure members */
829 ndev->base_addr = pci_resource_start(pdev, 0); 832 ndev->base_addr = pci_resource_start(pdev, 0);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index c5738f14c4ba..776aff3678ff 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -2640,7 +2640,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2640 2640
2641 if (rt2x00_rt(rt2x00dev, RT5392)) { 2641 if (rt2x00_rt(rt2x00dev, RT5392)) {
2642 rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr); 2642 rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
2643 if (info->default_power1 > POWER_BOUND) 2643 if (info->default_power2 > POWER_BOUND)
2644 rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND); 2644 rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND);
2645 else 2645 else
2646 rt2x00_set_field8(&rfcsr, RFCSR50_TX, 2646 rt2x00_set_field8(&rfcsr, RFCSR50_TX,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 080b1fcae5fa..9dd92a700442 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -181,6 +181,7 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
181static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac, 181static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
182 struct ieee80211_vif *vif) 182 struct ieee80211_vif *vif)
183{ 183{
184 struct ieee80211_tx_control control = {};
184 struct rt2x00_dev *rt2x00dev = data; 185 struct rt2x00_dev *rt2x00dev = data;
185 struct sk_buff *skb; 186 struct sk_buff *skb;
186 187
@@ -195,7 +196,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
195 */ 196 */
196 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); 197 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
197 while (skb) { 198 while (skb) {
198 rt2x00mac_tx(rt2x00dev->hw, NULL, skb); 199 rt2x00mac_tx(rt2x00dev->hw, &control, skb);
199 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); 200 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
200 } 201 }
201} 202}
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index a0935987fa3a..7f40ab8e1bd8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -146,7 +146,7 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length);
146 * @local: frame is not from mac80211 146 * @local: frame is not from mac80211
147 */ 147 */
148int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 148int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
149 bool local); 149 struct ieee80211_sta *sta, bool local);
150 150
151/** 151/**
152 * rt2x00queue_update_beacon - Send new beacon from mac80211 152 * rt2x00queue_update_beacon - Send new beacon from mac80211
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 7c157857f5ce..2183e7978399 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -90,7 +90,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
90 frag_skb->data, data_length, tx_info, 90 frag_skb->data, data_length, tx_info,
91 (struct ieee80211_rts *)(skb->data)); 91 (struct ieee80211_rts *)(skb->data));
92 92
93 retval = rt2x00queue_write_tx_frame(queue, skb, true); 93 retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
94 if (retval) { 94 if (retval) {
95 dev_kfree_skb_any(skb); 95 dev_kfree_skb_any(skb);
96 rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n"); 96 rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
@@ -151,7 +151,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
151 goto exit_fail; 151 goto exit_fail;
152 } 152 }
153 153
154 if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false))) 154 if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
155 goto exit_fail; 155 goto exit_fail;
156 156
157 /* 157 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 50590b1420a5..a5d38e8ad9e4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -635,7 +635,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry)
635} 635}
636 636
637int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 637int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
638 bool local) 638 struct ieee80211_sta *sta, bool local)
639{ 639{
640 struct ieee80211_tx_info *tx_info; 640 struct ieee80211_tx_info *tx_info;
641 struct queue_entry *entry; 641 struct queue_entry *entry;
@@ -649,7 +649,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
649 * after that we are free to use the skb->cb array 649 * after that we are free to use the skb->cb array
650 * for our information. 650 * for our information.
651 */ 651 */
652 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL); 652 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
653 653
654 /* 654 /*
655 * All information is retrieved from the skb->cb array, 655 * All information is retrieved from the skb->cb array,
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 9a78e3daf742..ff784072fb42 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -37,6 +37,7 @@
37 37
38#include <linux/ip.h> 38#include <linux/ip.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/udp.h>
40 41
41/* 42/*
42 *NOTICE!!!: This file will be very big, we should 43 *NOTICE!!!: This file will be very big, we should
@@ -1074,64 +1075,52 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1074 if (!ieee80211_is_data(fc)) 1075 if (!ieee80211_is_data(fc))
1075 return false; 1076 return false;
1076 1077
1078 ip = (const struct iphdr *)(skb->data + mac_hdr_len +
1079 SNAP_SIZE + PROTOC_TYPE_SIZE);
1080 ether_type = be16_to_cpup((__be16 *)
1081 (skb->data + mac_hdr_len + SNAP_SIZE));
1077 1082
1078 ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len + 1083 switch (ether_type) {
1079 SNAP_SIZE + PROTOC_TYPE_SIZE); 1084 case ETH_P_IP: {
1080 ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE); 1085 struct udphdr *udp;
1081 /* ether_type = ntohs(ether_type); */ 1086 u16 src;
1082 1087 u16 dst;
1083 if (ETH_P_IP == ether_type) {
1084 if (IPPROTO_UDP == ip->protocol) {
1085 struct udphdr *udp = (struct udphdr *)((u8 *) ip +
1086 (ip->ihl << 2));
1087 if (((((u8 *) udp)[1] == 68) &&
1088 (((u8 *) udp)[3] == 67)) ||
1089 ((((u8 *) udp)[1] == 67) &&
1090 (((u8 *) udp)[3] == 68))) {
1091 /*
1092 * 68 : UDP BOOTP client
1093 * 67 : UDP BOOTP server
1094 */
1095 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
1096 DBG_DMESG, "dhcp %s !!\n",
1097 is_tx ? "Tx" : "Rx");
1098
1099 if (is_tx) {
1100 rtlpriv->enter_ps = false;
1101 schedule_work(&rtlpriv->
1102 works.lps_change_work);
1103 ppsc->last_delaylps_stamp_jiffies =
1104 jiffies;
1105 }
1106 1088
1107 return true; 1089 if (ip->protocol != IPPROTO_UDP)
1108 } 1090 return false;
1109 } 1091 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
1110 } else if (ETH_P_ARP == ether_type) { 1092 src = be16_to_cpu(udp->source);
1111 if (is_tx) { 1093 dst = be16_to_cpu(udp->dest);
1112 rtlpriv->enter_ps = false;
1113 schedule_work(&rtlpriv->works.lps_change_work);
1114 ppsc->last_delaylps_stamp_jiffies = jiffies;
1115 }
1116 1094
1117 return true; 1095 /* If this case involves port 68 (UDP BOOTP client) connecting
1118 } else if (ETH_P_PAE == ether_type) { 1096 * with port 67 (UDP BOOTP server), then return true so that
1097 * the lowest speed is used.
1098 */
1099 if (!((src == 68 && dst == 67) || (src == 67 && dst == 68)))
1100 return false;
1101
1102 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
1103 "dhcp %s !!\n", is_tx ? "Tx" : "Rx");
1104 break;
1105 }
1106 case ETH_P_ARP:
1107 break;
1108 case ETH_P_PAE:
1119 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, 1109 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
1120 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); 1110 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
1121 1111 break;
1122 if (is_tx) { 1112 case ETH_P_IPV6:
1123 rtlpriv->enter_ps = false; 1113 /* TODO: Is this right? */
1124 schedule_work(&rtlpriv->works.lps_change_work); 1114 return false;
1125 ppsc->last_delaylps_stamp_jiffies = jiffies; 1115 default:
1126 } 1116 return false;
1127
1128 return true;
1129 } else if (ETH_P_IPV6 == ether_type) {
1130 /* IPv6 */
1131 return true;
1132 } 1117 }
1133 1118 if (is_tx) {
1134 return false; 1119 rtlpriv->enter_ps = false;
1120 schedule_work(&rtlpriv->works.lps_change_work);
1121 ppsc->last_delaylps_stamp_jiffies = jiffies;
1122 }
1123 return true;
1135} 1124}
1136EXPORT_SYMBOL_GPL(rtl_is_special_data); 1125EXPORT_SYMBOL_GPL(rtl_is_special_data);
1137 1126
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index ae13fb94b2e8..2ffc7298f686 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -262,9 +262,9 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
262 sizeof(u8), GFP_ATOMIC); 262 sizeof(u8), GFP_ATOMIC);
263 if (!efuse_tbl) 263 if (!efuse_tbl)
264 return; 264 return;
265 efuse_word = kmalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC); 265 efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
266 if (!efuse_word) 266 if (!efuse_word)
267 goto done; 267 goto out;
268 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { 268 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
269 efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16), 269 efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16),
270 GFP_ATOMIC); 270 GFP_ATOMIC);
@@ -378,6 +378,7 @@ done:
378 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) 378 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
379 kfree(efuse_word[i]); 379 kfree(efuse_word[i]);
380 kfree(efuse_word); 380 kfree(efuse_word);
381out:
381 kfree(efuse_tbl); 382 kfree(efuse_tbl);
382} 383}
383 384
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 393685390f3e..e26312fb4356 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -769,7 +769,7 @@ static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
769 769
770static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, 770static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
771 struct rtl_stats *pstats, 771 struct rtl_stats *pstats,
772 struct rx_desc_92c *pdesc, 772 struct rx_desc_92c *p_desc,
773 struct rx_fwinfo_92c *p_drvinfo, 773 struct rx_fwinfo_92c *p_drvinfo,
774 bool packet_match_bssid, 774 bool packet_match_bssid,
775 bool packet_toself, 775 bool packet_toself,
@@ -784,11 +784,11 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
784 u32 rssi, total_rssi = 0; 784 u32 rssi, total_rssi = 0;
785 bool in_powersavemode = false; 785 bool in_powersavemode = false;
786 bool is_cck_rate; 786 bool is_cck_rate;
787 u8 *pdesc = (u8 *)p_desc;
787 788
788 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); 789 is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc);
789 pstats->packet_matchbssid = packet_match_bssid; 790 pstats->packet_matchbssid = packet_match_bssid;
790 pstats->packet_toself = packet_toself; 791 pstats->packet_toself = packet_toself;
791 pstats->is_cck = is_cck_rate;
792 pstats->packet_beacon = packet_beacon; 792 pstats->packet_beacon = packet_beacon;
793 pstats->is_cck = is_cck_rate; 793 pstats->is_cck = is_cck_rate;
794 pstats->RX_SIGQ[0] = -1; 794 pstats->RX_SIGQ[0] = -1;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 25e50ffc44ec..1bc21ccfa71b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -303,10 +303,10 @@ out:
303bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, 303bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
304 struct rtl_stats *stats, 304 struct rtl_stats *stats,
305 struct ieee80211_rx_status *rx_status, 305 struct ieee80211_rx_status *rx_status,
306 u8 *p_desc, struct sk_buff *skb) 306 u8 *pdesc, struct sk_buff *skb)
307{ 307{
308 struct rx_fwinfo_92c *p_drvinfo; 308 struct rx_fwinfo_92c *p_drvinfo;
309 struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; 309 struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc;
310 u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc); 310 u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
311 311
312 stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); 312 stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
@@ -345,11 +345,11 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
345 if (phystatus) { 345 if (phystatus) {
346 p_drvinfo = (struct rx_fwinfo_92c *)(skb->data + 346 p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
347 stats->rx_bufshift); 347 stats->rx_bufshift);
348 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc, 348 rtl92c_translate_rx_signal_stuff(hw, skb, stats, p_desc,
349 p_drvinfo); 349 p_drvinfo);
350 } 350 }
351 /*rx_status->qual = stats->signal; */ 351 /*rx_status->qual = stats->signal; */
352 rx_status->signal = stats->rssi + 10; 352 rx_status->signal = stats->recvsignalpower + 10;
353 return true; 353 return true;
354} 354}
355 355
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 945ddecf90c9..0eb0f4ae5920 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -525,7 +525,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
525 p_drvinfo); 525 p_drvinfo);
526 } 526 }
527 /*rx_status->qual = stats->signal; */ 527 /*rx_status->qual = stats->signal; */
528 rx_status->signal = stats->rssi + 10; 528 rx_status->signal = stats->recvsignalpower + 10;
529 return true; 529 return true;
530} 530}
531 531
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
index 5061f1db3f02..92d38ab3c60e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -265,7 +265,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
265 rtlefuse->pwrgroup_ht40 265 rtlefuse->pwrgroup_ht40
266 [RF90_PATH_A][chnl - 1]) { 266 [RF90_PATH_A][chnl - 1]) {
267 pwrdiff_limit[i] = 267 pwrdiff_limit[i] =
268 rtlefuse->pwrgroup_ht20 268 rtlefuse->pwrgroup_ht40
269 [RF90_PATH_A][chnl - 1]; 269 [RF90_PATH_A][chnl - 1];
270 } 270 }
271 } else { 271 } else {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 222d2e792ca6..27efbcdac6a9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -329,7 +329,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
329 } 329 }
330 330
331 /*rx_status->qual = stats->signal; */ 331 /*rx_status->qual = stats->signal; */
332 rx_status->signal = stats->rssi + 10; 332 rx_status->signal = stats->recvsignalpower + 10;
333 333
334 return true; 334 return true;
335} 335}
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index d224dc3bb092..0c65386fa30d 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -77,11 +77,7 @@
77#define RTL_SLOT_TIME_9 9 77#define RTL_SLOT_TIME_9 9
78#define RTL_SLOT_TIME_20 20 78#define RTL_SLOT_TIME_20 20
79 79
80/*related with tcp/ip. */ 80/*related to tcp/ip. */
81/*if_ehther.h*/
82#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */
83#define ETH_P_IP 0x0800 /*Internet Protocol packet */
84#define ETH_P_ARP 0x0806 /*Address Resolution packet */
85#define SNAP_SIZE 6 81#define SNAP_SIZE 6
86#define PROTOC_TYPE_SIZE 2 82#define PROTOC_TYPE_SIZE 2
87 83
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b78ee10a956a..870f1fa58370 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -368,11 +368,11 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
368 unsigned long rx_ring_ref, unsigned int tx_evtchn, 368 unsigned long rx_ring_ref, unsigned int tx_evtchn,
369 unsigned int rx_evtchn) 369 unsigned int rx_evtchn)
370{ 370{
371 struct task_struct *task;
371 int err = -ENOMEM; 372 int err = -ENOMEM;
372 373
373 /* Already connected through? */ 374 BUG_ON(vif->tx_irq);
374 if (vif->tx_irq) 375 BUG_ON(vif->task);
375 return 0;
376 376
377 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 377 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
378 if (err < 0) 378 if (err < 0)
@@ -411,14 +411,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
411 } 411 }
412 412
413 init_waitqueue_head(&vif->wq); 413 init_waitqueue_head(&vif->wq);
414 vif->task = kthread_create(xenvif_kthread, 414 task = kthread_create(xenvif_kthread,
415 (void *)vif, "%s", vif->dev->name); 415 (void *)vif, "%s", vif->dev->name);
416 if (IS_ERR(vif->task)) { 416 if (IS_ERR(task)) {
417 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 417 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
418 err = PTR_ERR(vif->task); 418 err = PTR_ERR(task);
419 goto err_rx_unbind; 419 goto err_rx_unbind;
420 } 420 }
421 421
422 vif->task = task;
423
422 rtnl_lock(); 424 rtnl_lock();
423 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 425 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
424 dev_set_mtu(vif->dev, ETH_DATA_LEN); 426 dev_set_mtu(vif->dev, ETH_DATA_LEN);
@@ -461,6 +463,11 @@ void xenvif_disconnect(struct xenvif *vif)
461 if (netif_carrier_ok(vif->dev)) 463 if (netif_carrier_ok(vif->dev))
462 xenvif_carrier_off(vif); 464 xenvif_carrier_off(vif);
463 465
466 if (vif->task) {
467 kthread_stop(vif->task);
468 vif->task = NULL;
469 }
470
464 if (vif->tx_irq) { 471 if (vif->tx_irq) {
465 if (vif->tx_irq == vif->rx_irq) 472 if (vif->tx_irq == vif->rx_irq)
466 unbind_from_irqhandler(vif->tx_irq, vif); 473 unbind_from_irqhandler(vif->tx_irq, vif);
@@ -471,9 +478,6 @@ void xenvif_disconnect(struct xenvif *vif)
471 vif->tx_irq = 0; 478 vif->tx_irq = 0;
472 } 479 }
473 480
474 if (vif->task)
475 kthread_stop(vif->task);
476
477 xenvif_unmap_frontend_rings(vif); 481 xenvif_unmap_frontend_rings(vif);
478} 482}
479 483
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 919b6509455c..e884ee1fe7ed 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -39,6 +39,7 @@
39#include <linux/udp.h> 39#include <linux/udp.h>
40 40
41#include <net/tcp.h> 41#include <net/tcp.h>
42#include <net/ip6_checksum.h>
42 43
43#include <xen/xen.h> 44#include <xen/xen.h>
44#include <xen/events.h> 45#include <xen/events.h>
@@ -451,7 +452,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
451 } 452 }
452 453
453 /* Set up a GSO prefix descriptor, if necessary */ 454 /* Set up a GSO prefix descriptor, if necessary */
454 if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) { 455 if ((1 << gso_type) & vif->gso_prefix_mask) {
455 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 456 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
456 meta = npo->meta + npo->meta_prod++; 457 meta = npo->meta + npo->meta_prod++;
457 meta->gso_type = gso_type; 458 meta->gso_type = gso_type;
@@ -1148,75 +1149,92 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1148 return 0; 1149 return 0;
1149} 1150}
1150 1151
1151static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len) 1152static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
1153 unsigned int max)
1152{ 1154{
1153 if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) { 1155 if (skb_headlen(skb) >= len)
1154 /* If we need to pullup then pullup to the max, so we 1156 return 0;
1155 * won't need to do it again. 1157
1156 */ 1158 /* If we need to pullup then pullup to the max, so we
1157 int target = min_t(int, skb->len, MAX_TCP_HEADER); 1159 * won't need to do it again.
1158 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1160 */
1159 } 1161 if (max > skb->len)
1162 max = skb->len;
1163
1164 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
1165 return -ENOMEM;
1166
1167 if (skb_headlen(skb) < len)
1168 return -EPROTO;
1169
1170 return 0;
1160} 1171}
1161 1172
1173/* This value should be large enough to cover a tagged ethernet header plus
1174 * maximally sized IP and TCP or UDP headers.
1175 */
1176#define MAX_IP_HDR_LEN 128
1177
1162static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, 1178static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1163 int recalculate_partial_csum) 1179 int recalculate_partial_csum)
1164{ 1180{
1165 struct iphdr *iph = (void *)skb->data;
1166 unsigned int header_size;
1167 unsigned int off; 1181 unsigned int off;
1168 int err = -EPROTO; 1182 bool fragment;
1183 int err;
1169 1184
1170 off = sizeof(struct iphdr); 1185 fragment = false;
1171 1186
1172 header_size = skb->network_header + off + MAX_IPOPTLEN; 1187 err = maybe_pull_tail(skb,
1173 maybe_pull_tail(skb, header_size); 1188 sizeof(struct iphdr),
1189 MAX_IP_HDR_LEN);
1190 if (err < 0)
1191 goto out;
1174 1192
1175 off = iph->ihl * 4; 1193 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
1194 fragment = true;
1176 1195
1177 switch (iph->protocol) { 1196 off = ip_hdrlen(skb);
1197
1198 err = -EPROTO;
1199
1200 switch (ip_hdr(skb)->protocol) {
1178 case IPPROTO_TCP: 1201 case IPPROTO_TCP:
1202 err = maybe_pull_tail(skb,
1203 off + sizeof(struct tcphdr),
1204 MAX_IP_HDR_LEN);
1205 if (err < 0)
1206 goto out;
1207
1179 if (!skb_partial_csum_set(skb, off, 1208 if (!skb_partial_csum_set(skb, off,
1180 offsetof(struct tcphdr, check))) 1209 offsetof(struct tcphdr, check)))
1181 goto out; 1210 goto out;
1182 1211
1183 if (recalculate_partial_csum) { 1212 if (recalculate_partial_csum)
1184 struct tcphdr *tcph = tcp_hdr(skb); 1213 tcp_hdr(skb)->check =
1185 1214 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1186 header_size = skb->network_header + 1215 ip_hdr(skb)->daddr,
1187 off + 1216 skb->len - off,
1188 sizeof(struct tcphdr); 1217 IPPROTO_TCP, 0);
1189 maybe_pull_tail(skb, header_size);
1190
1191 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1192 skb->len - off,
1193 IPPROTO_TCP, 0);
1194 }
1195 break; 1218 break;
1196 case IPPROTO_UDP: 1219 case IPPROTO_UDP:
1220 err = maybe_pull_tail(skb,
1221 off + sizeof(struct udphdr),
1222 MAX_IP_HDR_LEN);
1223 if (err < 0)
1224 goto out;
1225
1197 if (!skb_partial_csum_set(skb, off, 1226 if (!skb_partial_csum_set(skb, off,
1198 offsetof(struct udphdr, check))) 1227 offsetof(struct udphdr, check)))
1199 goto out; 1228 goto out;
1200 1229
1201 if (recalculate_partial_csum) { 1230 if (recalculate_partial_csum)
1202 struct udphdr *udph = udp_hdr(skb); 1231 udp_hdr(skb)->check =
1203 1232 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1204 header_size = skb->network_header + 1233 ip_hdr(skb)->daddr,
1205 off + 1234 skb->len - off,
1206 sizeof(struct udphdr); 1235 IPPROTO_UDP, 0);
1207 maybe_pull_tail(skb, header_size);
1208
1209 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1210 skb->len - off,
1211 IPPROTO_UDP, 0);
1212 }
1213 break; 1236 break;
1214 default: 1237 default:
1215 if (net_ratelimit())
1216 netdev_err(vif->dev,
1217 "Attempting to checksum a non-TCP/UDP packet, "
1218 "dropping a protocol %d packet\n",
1219 iph->protocol);
1220 goto out; 1238 goto out;
1221 } 1239 }
1222 1240
@@ -1226,121 +1244,138 @@ out:
1226 return err; 1244 return err;
1227} 1245}
1228 1246
1247/* This value should be large enough to cover a tagged ethernet header plus
1248 * an IPv6 header, all options, and a maximal TCP or UDP header.
1249 */
1250#define MAX_IPV6_HDR_LEN 256
1251
1252#define OPT_HDR(type, skb, off) \
1253 (type *)(skb_network_header(skb) + (off))
1254
1229static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, 1255static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1230 int recalculate_partial_csum) 1256 int recalculate_partial_csum)
1231{ 1257{
1232 int err = -EPROTO; 1258 int err;
1233 struct ipv6hdr *ipv6h = (void *)skb->data;
1234 u8 nexthdr; 1259 u8 nexthdr;
1235 unsigned int header_size;
1236 unsigned int off; 1260 unsigned int off;
1261 unsigned int len;
1237 bool fragment; 1262 bool fragment;
1238 bool done; 1263 bool done;
1239 1264
1265 fragment = false;
1240 done = false; 1266 done = false;
1241 1267
1242 off = sizeof(struct ipv6hdr); 1268 off = sizeof(struct ipv6hdr);
1243 1269
1244 header_size = skb->network_header + off; 1270 err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
1245 maybe_pull_tail(skb, header_size); 1271 if (err < 0)
1272 goto out;
1246 1273
1247 nexthdr = ipv6h->nexthdr; 1274 nexthdr = ipv6_hdr(skb)->nexthdr;
1248 1275
1249 while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) && 1276 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
1250 !done) { 1277 while (off <= len && !done) {
1251 switch (nexthdr) { 1278 switch (nexthdr) {
1252 case IPPROTO_DSTOPTS: 1279 case IPPROTO_DSTOPTS:
1253 case IPPROTO_HOPOPTS: 1280 case IPPROTO_HOPOPTS:
1254 case IPPROTO_ROUTING: { 1281 case IPPROTO_ROUTING: {
1255 struct ipv6_opt_hdr *hp = (void *)(skb->data + off); 1282 struct ipv6_opt_hdr *hp;
1256 1283
1257 header_size = skb->network_header + 1284 err = maybe_pull_tail(skb,
1258 off + 1285 off +
1259 sizeof(struct ipv6_opt_hdr); 1286 sizeof(struct ipv6_opt_hdr),
1260 maybe_pull_tail(skb, header_size); 1287 MAX_IPV6_HDR_LEN);
1288 if (err < 0)
1289 goto out;
1261 1290
1291 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
1262 nexthdr = hp->nexthdr; 1292 nexthdr = hp->nexthdr;
1263 off += ipv6_optlen(hp); 1293 off += ipv6_optlen(hp);
1264 break; 1294 break;
1265 } 1295 }
1266 case IPPROTO_AH: { 1296 case IPPROTO_AH: {
1267 struct ip_auth_hdr *hp = (void *)(skb->data + off); 1297 struct ip_auth_hdr *hp;
1268 1298
1269 header_size = skb->network_header + 1299 err = maybe_pull_tail(skb,
1270 off + 1300 off +
1271 sizeof(struct ip_auth_hdr); 1301 sizeof(struct ip_auth_hdr),
1272 maybe_pull_tail(skb, header_size); 1302 MAX_IPV6_HDR_LEN);
1303 if (err < 0)
1304 goto out;
1273 1305
1306 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
1274 nexthdr = hp->nexthdr; 1307 nexthdr = hp->nexthdr;
1275 off += (hp->hdrlen+2)<<2; 1308 off += ipv6_authlen(hp);
1309 break;
1310 }
1311 case IPPROTO_FRAGMENT: {
1312 struct frag_hdr *hp;
1313
1314 err = maybe_pull_tail(skb,
1315 off +
1316 sizeof(struct frag_hdr),
1317 MAX_IPV6_HDR_LEN);
1318 if (err < 0)
1319 goto out;
1320
1321 hp = OPT_HDR(struct frag_hdr, skb, off);
1322
1323 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
1324 fragment = true;
1325
1326 nexthdr = hp->nexthdr;
1327 off += sizeof(struct frag_hdr);
1276 break; 1328 break;
1277 } 1329 }
1278 case IPPROTO_FRAGMENT:
1279 fragment = true;
1280 /* fall through */
1281 default: 1330 default:
1282 done = true; 1331 done = true;
1283 break; 1332 break;
1284 } 1333 }
1285 } 1334 }
1286 1335
1287 if (!done) { 1336 err = -EPROTO;
1288 if (net_ratelimit())
1289 netdev_err(vif->dev, "Failed to parse packet header\n");
1290 goto out;
1291 }
1292 1337
1293 if (fragment) { 1338 if (!done || fragment)
1294 if (net_ratelimit())
1295 netdev_err(vif->dev, "Packet is a fragment!\n");
1296 goto out; 1339 goto out;
1297 }
1298 1340
1299 switch (nexthdr) { 1341 switch (nexthdr) {
1300 case IPPROTO_TCP: 1342 case IPPROTO_TCP:
1343 err = maybe_pull_tail(skb,
1344 off + sizeof(struct tcphdr),
1345 MAX_IPV6_HDR_LEN);
1346 if (err < 0)
1347 goto out;
1348
1301 if (!skb_partial_csum_set(skb, off, 1349 if (!skb_partial_csum_set(skb, off,
1302 offsetof(struct tcphdr, check))) 1350 offsetof(struct tcphdr, check)))
1303 goto out; 1351 goto out;
1304 1352
1305 if (recalculate_partial_csum) { 1353 if (recalculate_partial_csum)
1306 struct tcphdr *tcph = tcp_hdr(skb); 1354 tcp_hdr(skb)->check =
1307 1355 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1308 header_size = skb->network_header + 1356 &ipv6_hdr(skb)->daddr,
1309 off + 1357 skb->len - off,
1310 sizeof(struct tcphdr); 1358 IPPROTO_TCP, 0);
1311 maybe_pull_tail(skb, header_size);
1312
1313 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1314 &ipv6h->daddr,
1315 skb->len - off,
1316 IPPROTO_TCP, 0);
1317 }
1318 break; 1359 break;
1319 case IPPROTO_UDP: 1360 case IPPROTO_UDP:
1361 err = maybe_pull_tail(skb,
1362 off + sizeof(struct udphdr),
1363 MAX_IPV6_HDR_LEN);
1364 if (err < 0)
1365 goto out;
1366
1320 if (!skb_partial_csum_set(skb, off, 1367 if (!skb_partial_csum_set(skb, off,
1321 offsetof(struct udphdr, check))) 1368 offsetof(struct udphdr, check)))
1322 goto out; 1369 goto out;
1323 1370
1324 if (recalculate_partial_csum) { 1371 if (recalculate_partial_csum)
1325 struct udphdr *udph = udp_hdr(skb); 1372 udp_hdr(skb)->check =
1326 1373 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1327 header_size = skb->network_header + 1374 &ipv6_hdr(skb)->daddr,
1328 off + 1375 skb->len - off,
1329 sizeof(struct udphdr); 1376 IPPROTO_UDP, 0);
1330 maybe_pull_tail(skb, header_size);
1331
1332 udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1333 &ipv6h->daddr,
1334 skb->len - off,
1335 IPPROTO_UDP, 0);
1336 }
1337 break; 1377 break;
1338 default: 1378 default:
1339 if (net_ratelimit())
1340 netdev_err(vif->dev,
1341 "Attempting to checksum a non-TCP/UDP packet, "
1342 "dropping a protocol %d packet\n",
1343 nexthdr);
1344 goto out; 1379 goto out;
1345 } 1380 }
1346 1381
@@ -1410,14 +1445,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1410 return false; 1445 return false;
1411} 1446}
1412 1447
1413static unsigned xenvif_tx_build_gops(struct xenvif *vif) 1448static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1414{ 1449{
1415 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; 1450 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
1416 struct sk_buff *skb; 1451 struct sk_buff *skb;
1417 int ret; 1452 int ret;
1418 1453
1419 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX 1454 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1420 < MAX_PENDING_REQS)) { 1455 < MAX_PENDING_REQS) &&
1456 (skb_queue_len(&vif->tx_queue) < budget)) {
1421 struct xen_netif_tx_request txreq; 1457 struct xen_netif_tx_request txreq;
1422 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1458 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1423 struct page *page; 1459 struct page *page;
@@ -1439,7 +1475,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1439 continue; 1475 continue;
1440 } 1476 }
1441 1477
1442 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); 1478 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
1443 if (!work_to_do) 1479 if (!work_to_do)
1444 break; 1480 break;
1445 1481
@@ -1579,14 +1615,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1579} 1615}
1580 1616
1581 1617
1582static int xenvif_tx_submit(struct xenvif *vif, int budget) 1618static int xenvif_tx_submit(struct xenvif *vif)
1583{ 1619{
1584 struct gnttab_copy *gop = vif->tx_copy_ops; 1620 struct gnttab_copy *gop = vif->tx_copy_ops;
1585 struct sk_buff *skb; 1621 struct sk_buff *skb;
1586 int work_done = 0; 1622 int work_done = 0;
1587 1623
1588 while (work_done < budget && 1624 while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1589 (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1590 struct xen_netif_tx_request *txp; 1625 struct xen_netif_tx_request *txp;
1591 u16 pending_idx; 1626 u16 pending_idx;
1592 unsigned data_len; 1627 unsigned data_len;
@@ -1661,14 +1696,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
1661 if (unlikely(!tx_work_todo(vif))) 1696 if (unlikely(!tx_work_todo(vif)))
1662 return 0; 1697 return 0;
1663 1698
1664 nr_gops = xenvif_tx_build_gops(vif); 1699 nr_gops = xenvif_tx_build_gops(vif, budget);
1665 1700
1666 if (nr_gops == 0) 1701 if (nr_gops == 0)
1667 return 0; 1702 return 0;
1668 1703
1669 gnttab_batch_copy(vif->tx_copy_ops, nr_gops); 1704 gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
1670 1705
1671 work_done = xenvif_tx_submit(vif, nr_gops); 1706 work_done = xenvif_tx_submit(vif);
1672 1707
1673 return work_done; 1708 return work_done;
1674} 1709}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d85e66979711..e59acb1daa23 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -277,12 +277,13 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
277 if (!page) { 277 if (!page) {
278 kfree_skb(skb); 278 kfree_skb(skb);
279no_skb: 279no_skb:
280 /* Any skbuffs queued for refill? Force them out. */
281 if (i != 0)
282 goto refill;
283 /* Could not allocate any skbuffs. Try again later. */ 280 /* Could not allocate any skbuffs. Try again later. */
284 mod_timer(&np->rx_refill_timer, 281 mod_timer(&np->rx_refill_timer,
285 jiffies + (HZ/10)); 282 jiffies + (HZ/10));
283
284 /* Any skbuffs queued for refill? Force them out. */
285 if (i != 0)
286 goto refill;
286 break; 287 break;
287 } 288 }
288 289
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index 1cb6e51e6bda..170e8e60cdb7 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -141,6 +141,24 @@ void ntb_unregister_event_callback(struct ntb_device *ndev)
141 ndev->event_cb = NULL; 141 ndev->event_cb = NULL;
142} 142}
143 143
144static void ntb_irq_work(unsigned long data)
145{
146 struct ntb_db_cb *db_cb = (struct ntb_db_cb *)data;
147 int rc;
148
149 rc = db_cb->callback(db_cb->data, db_cb->db_num);
150 if (rc)
151 tasklet_schedule(&db_cb->irq_work);
152 else {
153 struct ntb_device *ndev = db_cb->ndev;
154 unsigned long mask;
155
156 mask = readw(ndev->reg_ofs.ldb_mask);
157 clear_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
158 writew(mask, ndev->reg_ofs.ldb_mask);
159 }
160}
161
144/** 162/**
145 * ntb_register_db_callback() - register a callback for doorbell interrupt 163 * ntb_register_db_callback() - register a callback for doorbell interrupt
146 * @ndev: pointer to ntb_device instance 164 * @ndev: pointer to ntb_device instance
@@ -155,7 +173,7 @@ void ntb_unregister_event_callback(struct ntb_device *ndev)
155 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 173 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
156 */ 174 */
157int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx, 175int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
158 void *data, void (*func)(void *data, int db_num)) 176 void *data, int (*func)(void *data, int db_num))
159{ 177{
160 unsigned long mask; 178 unsigned long mask;
161 179
@@ -166,6 +184,10 @@ int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
166 184
167 ndev->db_cb[idx].callback = func; 185 ndev->db_cb[idx].callback = func;
168 ndev->db_cb[idx].data = data; 186 ndev->db_cb[idx].data = data;
187 ndev->db_cb[idx].ndev = ndev;
188
189 tasklet_init(&ndev->db_cb[idx].irq_work, ntb_irq_work,
190 (unsigned long) &ndev->db_cb[idx]);
169 191
170 /* unmask interrupt */ 192 /* unmask interrupt */
171 mask = readw(ndev->reg_ofs.ldb_mask); 193 mask = readw(ndev->reg_ofs.ldb_mask);
@@ -194,6 +216,8 @@ void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
194 set_bit(idx * ndev->bits_per_vector, &mask); 216 set_bit(idx * ndev->bits_per_vector, &mask);
195 writew(mask, ndev->reg_ofs.ldb_mask); 217 writew(mask, ndev->reg_ofs.ldb_mask);
196 218
219 tasklet_disable(&ndev->db_cb[idx].irq_work);
220
197 ndev->db_cb[idx].callback = NULL; 221 ndev->db_cb[idx].callback = NULL;
198} 222}
199 223
@@ -678,6 +702,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
678 return -EINVAL; 702 return -EINVAL;
679 703
680 ndev->limits.max_mw = SNB_ERRATA_MAX_MW; 704 ndev->limits.max_mw = SNB_ERRATA_MAX_MW;
705 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
681 ndev->reg_ofs.spad_write = ndev->mw[1].vbase + 706 ndev->reg_ofs.spad_write = ndev->mw[1].vbase +
682 SNB_SPAD_OFFSET; 707 SNB_SPAD_OFFSET;
683 ndev->reg_ofs.rdb = ndev->mw[1].vbase + 708 ndev->reg_ofs.rdb = ndev->mw[1].vbase +
@@ -688,8 +713,21 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
688 */ 713 */
689 writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base + 714 writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base +
690 SNB_PBAR4LMT_OFFSET); 715 SNB_PBAR4LMT_OFFSET);
716 /* HW errata on the Limit registers. They can only be
717 * written when the base register is 4GB aligned and
718 * < 32bit. This should already be the case based on the
719 * driver defaults, but write the Limit registers first
720 * just in case.
721 */
691 } else { 722 } else {
692 ndev->limits.max_mw = SNB_MAX_MW; 723 ndev->limits.max_mw = SNB_MAX_MW;
724
725 /* HW Errata on bit 14 of b2bdoorbell register. Writes
726 * will not be mirrored to the remote system. Shrink
727 * the number of bits by one, since bit 14 is the last
728 * bit.
729 */
730 ndev->limits.max_db_bits = SNB_MAX_DB_BITS - 1;
693 ndev->reg_ofs.spad_write = ndev->reg_base + 731 ndev->reg_ofs.spad_write = ndev->reg_base +
694 SNB_B2B_SPAD_OFFSET; 732 SNB_B2B_SPAD_OFFSET;
695 ndev->reg_ofs.rdb = ndev->reg_base + 733 ndev->reg_ofs.rdb = ndev->reg_base +
@@ -699,6 +737,12 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
699 * something silly 737 * something silly
700 */ 738 */
701 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET); 739 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
740 /* HW errata on the Limit registers. They can only be
741 * written when the base register is 4GB aligned and
742 * < 32bit. This should already be the case based on the
743 * driver defaults, but write the Limit registers first
744 * just in case.
745 */
702 } 746 }
703 747
704 /* The Xeon errata workaround requires setting SBAR Base 748 /* The Xeon errata workaround requires setting SBAR Base
@@ -769,6 +813,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
769 * have an equal amount. 813 * have an equal amount.
770 */ 814 */
771 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2; 815 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
816 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
772 /* Note: The SDOORBELL is the cause of the errata. You REALLY 817 /* Note: The SDOORBELL is the cause of the errata. You REALLY
773 * don't want to touch it. 818 * don't want to touch it.
774 */ 819 */
@@ -793,6 +838,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
793 * have an equal amount. 838 * have an equal amount.
794 */ 839 */
795 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2; 840 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
841 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
796 ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET; 842 ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
797 ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET; 843 ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
798 ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET; 844 ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET;
@@ -819,7 +865,6 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
819 ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET; 865 ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET;
820 ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET; 866 ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
821 867
822 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
823 ndev->limits.msix_cnt = SNB_MSIX_CNT; 868 ndev->limits.msix_cnt = SNB_MSIX_CNT;
824 ndev->bits_per_vector = SNB_DB_BITS_PER_VEC; 869 ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
825 870
@@ -934,12 +979,16 @@ static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
934{ 979{
935 struct ntb_db_cb *db_cb = data; 980 struct ntb_db_cb *db_cb = data;
936 struct ntb_device *ndev = db_cb->ndev; 981 struct ntb_device *ndev = db_cb->ndev;
982 unsigned long mask;
937 983
938 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq, 984 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
939 db_cb->db_num); 985 db_cb->db_num);
940 986
941 if (db_cb->callback) 987 mask = readw(ndev->reg_ofs.ldb_mask);
942 db_cb->callback(db_cb->data, db_cb->db_num); 988 set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
989 writew(mask, ndev->reg_ofs.ldb_mask);
990
991 tasklet_schedule(&db_cb->irq_work);
943 992
944 /* No need to check for the specific HB irq, any interrupt means 993 /* No need to check for the specific HB irq, any interrupt means
945 * we're connected. 994 * we're connected.
@@ -955,12 +1004,16 @@ static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
955{ 1004{
956 struct ntb_db_cb *db_cb = data; 1005 struct ntb_db_cb *db_cb = data;
957 struct ntb_device *ndev = db_cb->ndev; 1006 struct ntb_device *ndev = db_cb->ndev;
1007 unsigned long mask;
958 1008
959 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq, 1009 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
960 db_cb->db_num); 1010 db_cb->db_num);
961 1011
962 if (db_cb->callback) 1012 mask = readw(ndev->reg_ofs.ldb_mask);
963 db_cb->callback(db_cb->data, db_cb->db_num); 1013 set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
1014 writew(mask, ndev->reg_ofs.ldb_mask);
1015
1016 tasklet_schedule(&db_cb->irq_work);
964 1017
965 /* On Sandybridge, there are 16 bits in the interrupt register 1018 /* On Sandybridge, there are 16 bits in the interrupt register
966 * but only 4 vectors. So, 5 bits are assigned to the first 3 1019 * but only 4 vectors. So, 5 bits are assigned to the first 3
@@ -986,7 +1039,7 @@ static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
986 dev_err(&ndev->pdev->dev, "Error determining link status\n"); 1039 dev_err(&ndev->pdev->dev, "Error determining link status\n");
987 1040
988 /* bit 15 is always the link bit */ 1041 /* bit 15 is always the link bit */
989 writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.ldb); 1042 writew(1 << SNB_LINK_DB, ndev->reg_ofs.ldb);
990 1043
991 return IRQ_HANDLED; 1044 return IRQ_HANDLED;
992} 1045}
@@ -1075,6 +1128,10 @@ static int ntb_setup_msix(struct ntb_device *ndev)
1075 "Only %d MSI-X vectors. Limiting the number of queues to that number.\n", 1128 "Only %d MSI-X vectors. Limiting the number of queues to that number.\n",
1076 rc); 1129 rc);
1077 msix_entries = rc; 1130 msix_entries = rc;
1131
1132 rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
1133 if (rc)
1134 goto err1;
1078 } 1135 }
1079 1136
1080 for (i = 0; i < msix_entries; i++) { 1137 for (i = 0; i < msix_entries; i++) {
@@ -1176,9 +1233,10 @@ static int ntb_setup_interrupts(struct ntb_device *ndev)
1176 */ 1233 */
1177 if (ndev->hw_type == BWD_HW) 1234 if (ndev->hw_type == BWD_HW)
1178 writeq(~0, ndev->reg_ofs.ldb_mask); 1235 writeq(~0, ndev->reg_ofs.ldb_mask);
1179 else 1236 else {
1180 writew(~(1 << ndev->limits.max_db_bits), 1237 u16 var = 1 << SNB_LINK_DB;
1181 ndev->reg_ofs.ldb_mask); 1238 writew(~var, ndev->reg_ofs.ldb_mask);
1239 }
1182 1240
1183 rc = ntb_setup_msix(ndev); 1241 rc = ntb_setup_msix(ndev);
1184 if (!rc) 1242 if (!rc)
@@ -1286,6 +1344,39 @@ static void ntb_free_debugfs(struct ntb_device *ndev)
1286 } 1344 }
1287} 1345}
1288 1346
1347static void ntb_hw_link_up(struct ntb_device *ndev)
1348{
1349 if (ndev->conn_type == NTB_CONN_TRANSPARENT)
1350 ntb_link_event(ndev, NTB_LINK_UP);
1351 else {
1352 u32 ntb_cntl;
1353
1354 /* Let's bring the NTB link up */
1355 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1356 ntb_cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
1357 ntb_cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
1358 ntb_cntl |= NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP;
1359 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1360 }
1361}
1362
1363static void ntb_hw_link_down(struct ntb_device *ndev)
1364{
1365 u32 ntb_cntl;
1366
1367 if (ndev->conn_type == NTB_CONN_TRANSPARENT) {
1368 ntb_link_event(ndev, NTB_LINK_DOWN);
1369 return;
1370 }
1371
1372 /* Bring NTB link down */
1373 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1374 ntb_cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
1375 ntb_cntl &= ~(NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP);
1376 ntb_cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
1377 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1378}
1379
1289static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1380static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1290{ 1381{
1291 struct ntb_device *ndev; 1382 struct ntb_device *ndev;
@@ -1374,9 +1465,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1374 if (rc) 1465 if (rc)
1375 goto err6; 1466 goto err6;
1376 1467
1377 /* Let's bring the NTB link up */ 1468 ntb_hw_link_up(ndev);
1378 writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP,
1379 ndev->reg_ofs.lnk_cntl);
1380 1469
1381 return 0; 1470 return 0;
1382 1471
@@ -1406,12 +1495,8 @@ static void ntb_pci_remove(struct pci_dev *pdev)
1406{ 1495{
1407 struct ntb_device *ndev = pci_get_drvdata(pdev); 1496 struct ntb_device *ndev = pci_get_drvdata(pdev);
1408 int i; 1497 int i;
1409 u32 ntb_cntl;
1410 1498
1411 /* Bring NTB link down */ 1499 ntb_hw_link_down(ndev);
1412 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1413 ntb_cntl |= NTB_CNTL_LINK_DISABLE;
1414 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1415 1500
1416 ntb_transport_free(ndev->ntb_transport); 1501 ntb_transport_free(ndev->ntb_transport);
1417 1502
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
index 0a31cedae7d4..bbdb7edca10c 100644
--- a/drivers/ntb/ntb_hw.h
+++ b/drivers/ntb/ntb_hw.h
@@ -106,10 +106,11 @@ struct ntb_mw {
106}; 106};
107 107
108struct ntb_db_cb { 108struct ntb_db_cb {
109 void (*callback) (void *data, int db_num); 109 int (*callback)(void *data, int db_num);
110 unsigned int db_num; 110 unsigned int db_num;
111 void *data; 111 void *data;
112 struct ntb_device *ndev; 112 struct ntb_device *ndev;
113 struct tasklet_struct irq_work;
113}; 114};
114 115
115struct ntb_device { 116struct ntb_device {
@@ -228,8 +229,8 @@ struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
228void ntb_unregister_transport(struct ntb_device *ndev); 229void ntb_unregister_transport(struct ntb_device *ndev);
229void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr); 230void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
230int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx, 231int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
231 void *data, void (*db_cb_func) (void *data, 232 void *data, int (*db_cb_func)(void *data,
232 int db_num)); 233 int db_num));
233void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx); 234void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
234int ntb_register_event_callback(struct ntb_device *ndev, 235int ntb_register_event_callback(struct ntb_device *ndev,
235 void (*event_cb_func) (void *handle, 236 void (*event_cb_func) (void *handle,
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
index aa4bdd393c58..9774506419d7 100644
--- a/drivers/ntb/ntb_regs.h
+++ b/drivers/ntb/ntb_regs.h
@@ -55,6 +55,7 @@
55#define SNB_MAX_COMPAT_SPADS 16 55#define SNB_MAX_COMPAT_SPADS 16
56/* Reserve the uppermost bit for link interrupt */ 56/* Reserve the uppermost bit for link interrupt */
57#define SNB_MAX_DB_BITS 15 57#define SNB_MAX_DB_BITS 15
58#define SNB_LINK_DB 15
58#define SNB_DB_BITS_PER_VEC 5 59#define SNB_DB_BITS_PER_VEC 5
59#define SNB_MAX_MW 2 60#define SNB_MAX_MW 2
60#define SNB_ERRATA_MAX_MW 1 61#define SNB_ERRATA_MAX_MW 1
@@ -75,9 +76,6 @@
75#define SNB_SBAR2XLAT_OFFSET 0x0030 76#define SNB_SBAR2XLAT_OFFSET 0x0030
76#define SNB_SBAR4XLAT_OFFSET 0x0038 77#define SNB_SBAR4XLAT_OFFSET 0x0038
77#define SNB_SBAR0BASE_OFFSET 0x0040 78#define SNB_SBAR0BASE_OFFSET 0x0040
78#define SNB_SBAR0BASE_OFFSET 0x0040
79#define SNB_SBAR2BASE_OFFSET 0x0048
80#define SNB_SBAR4BASE_OFFSET 0x0050
81#define SNB_SBAR2BASE_OFFSET 0x0048 79#define SNB_SBAR2BASE_OFFSET 0x0048
82#define SNB_SBAR4BASE_OFFSET 0x0050 80#define SNB_SBAR4BASE_OFFSET 0x0050
83#define SNB_NTBCNTL_OFFSET 0x0058 81#define SNB_NTBCNTL_OFFSET 0x0058
@@ -145,11 +143,13 @@
145#define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2) 143#define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2)
146#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF 144#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF
147 145
148#define NTB_CNTL_CFG_LOCK (1 << 0) 146#define NTB_CNTL_CFG_LOCK (1 << 0)
149#define NTB_CNTL_LINK_DISABLE (1 << 1) 147#define NTB_CNTL_LINK_DISABLE (1 << 1)
150#define NTB_CNTL_BAR23_SNOOP (1 << 2) 148#define NTB_CNTL_S2P_BAR23_SNOOP (1 << 2)
151#define NTB_CNTL_BAR45_SNOOP (1 << 6) 149#define NTB_CNTL_P2S_BAR23_SNOOP (1 << 4)
152#define BWD_CNTL_LINK_DOWN (1 << 16) 150#define NTB_CNTL_S2P_BAR45_SNOOP (1 << 6)
151#define NTB_CNTL_P2S_BAR45_SNOOP (1 << 8)
152#define BWD_CNTL_LINK_DOWN (1 << 16)
153 153
154#define NTB_PPD_OFFSET 0x00D4 154#define NTB_PPD_OFFSET 0x00D4
155#define SNB_PPD_CONN_TYPE 0x0003 155#define SNB_PPD_CONN_TYPE 0x0003
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 12a9e83c008b..3217f394d45b 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -119,7 +119,6 @@ struct ntb_transport_qp {
119 119
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len); 121 void *data, int len);
122 struct tasklet_struct rx_work;
123 struct list_head rx_pend_q; 122 struct list_head rx_pend_q;
124 struct list_head rx_free_q; 123 struct list_head rx_free_q;
125 spinlock_t ntb_rx_pend_q_lock; 124 spinlock_t ntb_rx_pend_q_lock;
@@ -584,11 +583,8 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
584 return 0; 583 return 0;
585} 584}
586 585
587static void ntb_qp_link_cleanup(struct work_struct *work) 586static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
588{ 587{
589 struct ntb_transport_qp *qp = container_of(work,
590 struct ntb_transport_qp,
591 link_cleanup);
592 struct ntb_transport *nt = qp->transport; 588 struct ntb_transport *nt = qp->transport;
593 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 589 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
594 590
@@ -602,6 +598,16 @@ static void ntb_qp_link_cleanup(struct work_struct *work)
602 598
603 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); 599 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
604 qp->qp_link = NTB_LINK_DOWN; 600 qp->qp_link = NTB_LINK_DOWN;
601}
602
603static void ntb_qp_link_cleanup_work(struct work_struct *work)
604{
605 struct ntb_transport_qp *qp = container_of(work,
606 struct ntb_transport_qp,
607 link_cleanup);
608 struct ntb_transport *nt = qp->transport;
609
610 ntb_qp_link_cleanup(qp);
605 611
606 if (nt->transport_link == NTB_LINK_UP) 612 if (nt->transport_link == NTB_LINK_UP)
607 schedule_delayed_work(&qp->link_work, 613 schedule_delayed_work(&qp->link_work,
@@ -613,22 +619,20 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp)
613 schedule_work(&qp->link_cleanup); 619 schedule_work(&qp->link_cleanup);
614} 620}
615 621
616static void ntb_transport_link_cleanup(struct work_struct *work) 622static void ntb_transport_link_cleanup(struct ntb_transport *nt)
617{ 623{
618 struct ntb_transport *nt = container_of(work, struct ntb_transport,
619 link_cleanup);
620 int i; 624 int i;
621 625
626 /* Pass along the info to any clients */
627 for (i = 0; i < nt->max_qps; i++)
628 if (!test_bit(i, &nt->qp_bitmap))
629 ntb_qp_link_cleanup(&nt->qps[i]);
630
622 if (nt->transport_link == NTB_LINK_DOWN) 631 if (nt->transport_link == NTB_LINK_DOWN)
623 cancel_delayed_work_sync(&nt->link_work); 632 cancel_delayed_work_sync(&nt->link_work);
624 else 633 else
625 nt->transport_link = NTB_LINK_DOWN; 634 nt->transport_link = NTB_LINK_DOWN;
626 635
627 /* Pass along the info to any clients */
628 for (i = 0; i < nt->max_qps; i++)
629 if (!test_bit(i, &nt->qp_bitmap))
630 ntb_qp_link_down(&nt->qps[i]);
631
632 /* The scratchpad registers keep the values if the remote side 636 /* The scratchpad registers keep the values if the remote side
633 * goes down, blast them now to give them a sane value the next 637 * goes down, blast them now to give them a sane value the next
634 * time they are accessed 638 * time they are accessed
@@ -637,6 +641,14 @@ static void ntb_transport_link_cleanup(struct work_struct *work)
637 ntb_write_local_spad(nt->ndev, i, 0); 641 ntb_write_local_spad(nt->ndev, i, 0);
638} 642}
639 643
644static void ntb_transport_link_cleanup_work(struct work_struct *work)
645{
646 struct ntb_transport *nt = container_of(work, struct ntb_transport,
647 link_cleanup);
648
649 ntb_transport_link_cleanup(nt);
650}
651
640static void ntb_transport_event_callback(void *data, enum ntb_hw_event event) 652static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
641{ 653{
642 struct ntb_transport *nt = data; 654 struct ntb_transport *nt = data;
@@ -880,7 +892,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
880 } 892 }
881 893
882 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 894 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
883 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup); 895 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
884 896
885 spin_lock_init(&qp->ntb_rx_pend_q_lock); 897 spin_lock_init(&qp->ntb_rx_pend_q_lock);
886 spin_lock_init(&qp->ntb_rx_free_q_lock); 898 spin_lock_init(&qp->ntb_rx_free_q_lock);
@@ -936,7 +948,7 @@ int ntb_transport_init(struct pci_dev *pdev)
936 } 948 }
937 949
938 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 950 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
939 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup); 951 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
940 952
941 rc = ntb_register_event_callback(nt->ndev, 953 rc = ntb_register_event_callback(nt->ndev,
942 ntb_transport_event_callback); 954 ntb_transport_event_callback);
@@ -972,7 +984,7 @@ void ntb_transport_free(void *transport)
972 struct ntb_device *ndev = nt->ndev; 984 struct ntb_device *ndev = nt->ndev;
973 int i; 985 int i;
974 986
975 nt->transport_link = NTB_LINK_DOWN; 987 ntb_transport_link_cleanup(nt);
976 988
977 /* verify that all the qp's are freed */ 989 /* verify that all the qp's are freed */
978 for (i = 0; i < nt->max_qps; i++) { 990 for (i = 0; i < nt->max_qps; i++) {
@@ -1034,10 +1046,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1034 struct dma_chan *chan = qp->dma_chan; 1046 struct dma_chan *chan = qp->dma_chan;
1035 struct dma_device *device; 1047 struct dma_device *device;
1036 size_t pay_off, buff_off; 1048 size_t pay_off, buff_off;
1037 dma_addr_t src, dest; 1049 struct dmaengine_unmap_data *unmap;
1038 dma_cookie_t cookie; 1050 dma_cookie_t cookie;
1039 void *buf = entry->buf; 1051 void *buf = entry->buf;
1040 unsigned long flags;
1041 1052
1042 entry->len = len; 1053 entry->len = len;
1043 1054
@@ -1045,35 +1056,49 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1045 goto err; 1056 goto err;
1046 1057
1047 if (len < copy_bytes) 1058 if (len < copy_bytes)
1048 goto err1; 1059 goto err_wait;
1049 1060
1050 device = chan->device; 1061 device = chan->device;
1051 pay_off = (size_t) offset & ~PAGE_MASK; 1062 pay_off = (size_t) offset & ~PAGE_MASK;
1052 buff_off = (size_t) buf & ~PAGE_MASK; 1063 buff_off = (size_t) buf & ~PAGE_MASK;
1053 1064
1054 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1065 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1055 goto err1; 1066 goto err_wait;
1056 1067
1057 dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); 1068 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1058 if (dma_mapping_error(device->dev, dest)) 1069 if (!unmap)
1059 goto err1; 1070 goto err_wait;
1060 1071
1061 src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); 1072 unmap->len = len;
1062 if (dma_mapping_error(device->dev, src)) 1073 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1063 goto err2; 1074 pay_off, len, DMA_TO_DEVICE);
1075 if (dma_mapping_error(device->dev, unmap->addr[0]))
1076 goto err_get_unmap;
1077
1078 unmap->to_cnt = 1;
1079
1080 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1081 buff_off, len, DMA_FROM_DEVICE);
1082 if (dma_mapping_error(device->dev, unmap->addr[1]))
1083 goto err_get_unmap;
1084
1085 unmap->from_cnt = 1;
1064 1086
1065 flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | 1087 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1066 DMA_PREP_INTERRUPT; 1088 unmap->addr[0], len,
1067 txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); 1089 DMA_PREP_INTERRUPT);
1068 if (!txd) 1090 if (!txd)
1069 goto err3; 1091 goto err_get_unmap;
1070 1092
1071 txd->callback = ntb_rx_copy_callback; 1093 txd->callback = ntb_rx_copy_callback;
1072 txd->callback_param = entry; 1094 txd->callback_param = entry;
1095 dma_set_unmap(txd, unmap);
1073 1096
1074 cookie = dmaengine_submit(txd); 1097 cookie = dmaengine_submit(txd);
1075 if (dma_submit_error(cookie)) 1098 if (dma_submit_error(cookie))
1076 goto err3; 1099 goto err_set_unmap;
1100
1101 dmaengine_unmap_put(unmap);
1077 1102
1078 qp->last_cookie = cookie; 1103 qp->last_cookie = cookie;
1079 1104
@@ -1081,11 +1106,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1081 1106
1082 return; 1107 return;
1083 1108
1084err3: 1109err_set_unmap:
1085 dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); 1110 dmaengine_unmap_put(unmap);
1086err2: 1111err_get_unmap:
1087 dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); 1112 dmaengine_unmap_put(unmap);
1088err1: 1113err_wait:
1089 /* If the callbacks come out of order, the writing of the index to the 1114 /* If the callbacks come out of order, the writing of the index to the
1090 * last completed will be out of order. This may result in the 1115 * last completed will be out of order. This may result in the
1091 * receive stalling forever. 1116 * receive stalling forever.
@@ -1175,11 +1200,14 @@ err:
1175 goto out; 1200 goto out;
1176} 1201}
1177 1202
1178static void ntb_transport_rx(unsigned long data) 1203static int ntb_transport_rxc_db(void *data, int db_num)
1179{ 1204{
1180 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; 1205 struct ntb_transport_qp *qp = data;
1181 int rc, i; 1206 int rc, i;
1182 1207
1208 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1209 __func__, db_num);
1210
1183 /* Limit the number of packets processed in a single interrupt to 1211 /* Limit the number of packets processed in a single interrupt to
1184 * provide fairness to others 1212 * provide fairness to others
1185 */ 1213 */
@@ -1191,16 +1219,8 @@ static void ntb_transport_rx(unsigned long data)
1191 1219
1192 if (qp->dma_chan) 1220 if (qp->dma_chan)
1193 dma_async_issue_pending(qp->dma_chan); 1221 dma_async_issue_pending(qp->dma_chan);
1194}
1195
1196static void ntb_transport_rxc_db(void *data, int db_num)
1197{
1198 struct ntb_transport_qp *qp = data;
1199
1200 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1201 __func__, db_num);
1202 1222
1203 tasklet_schedule(&qp->rx_work); 1223 return i;
1204} 1224}
1205 1225
1206static void ntb_tx_copy_callback(void *data) 1226static void ntb_tx_copy_callback(void *data)
@@ -1245,12 +1265,12 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1245 struct dma_chan *chan = qp->dma_chan; 1265 struct dma_chan *chan = qp->dma_chan;
1246 struct dma_device *device; 1266 struct dma_device *device;
1247 size_t dest_off, buff_off; 1267 size_t dest_off, buff_off;
1248 dma_addr_t src, dest; 1268 struct dmaengine_unmap_data *unmap;
1269 dma_addr_t dest;
1249 dma_cookie_t cookie; 1270 dma_cookie_t cookie;
1250 void __iomem *offset; 1271 void __iomem *offset;
1251 size_t len = entry->len; 1272 size_t len = entry->len;
1252 void *buf = entry->buf; 1273 void *buf = entry->buf;
1253 unsigned long flags;
1254 1274
1255 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 1275 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1256 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1276 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
@@ -1273,28 +1293,41 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1273 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1293 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1274 goto err; 1294 goto err;
1275 1295
1276 src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); 1296 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1277 if (dma_mapping_error(device->dev, src)) 1297 if (!unmap)
1278 goto err; 1298 goto err;
1279 1299
1280 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; 1300 unmap->len = len;
1281 txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); 1301 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1302 buff_off, len, DMA_TO_DEVICE);
1303 if (dma_mapping_error(device->dev, unmap->addr[0]))
1304 goto err_get_unmap;
1305
1306 unmap->to_cnt = 1;
1307
1308 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1309 DMA_PREP_INTERRUPT);
1282 if (!txd) 1310 if (!txd)
1283 goto err1; 1311 goto err_get_unmap;
1284 1312
1285 txd->callback = ntb_tx_copy_callback; 1313 txd->callback = ntb_tx_copy_callback;
1286 txd->callback_param = entry; 1314 txd->callback_param = entry;
1315 dma_set_unmap(txd, unmap);
1287 1316
1288 cookie = dmaengine_submit(txd); 1317 cookie = dmaengine_submit(txd);
1289 if (dma_submit_error(cookie)) 1318 if (dma_submit_error(cookie))
1290 goto err1; 1319 goto err_set_unmap;
1320
1321 dmaengine_unmap_put(unmap);
1291 1322
1292 dma_async_issue_pending(chan); 1323 dma_async_issue_pending(chan);
1293 qp->tx_async++; 1324 qp->tx_async++;
1294 1325
1295 return; 1326 return;
1296err1: 1327err_set_unmap:
1297 dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); 1328 dmaengine_unmap_put(unmap);
1329err_get_unmap:
1330 dmaengine_unmap_put(unmap);
1298err: 1331err:
1299 ntb_memcpy_tx(entry, offset); 1332 ntb_memcpy_tx(entry, offset);
1300 qp->tx_memcpy++; 1333 qp->tx_memcpy++;
@@ -1406,11 +1439,12 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1406 qp->tx_handler = handlers->tx_handler; 1439 qp->tx_handler = handlers->tx_handler;
1407 qp->event_handler = handlers->event_handler; 1440 qp->event_handler = handlers->event_handler;
1408 1441
1442 dmaengine_get();
1409 qp->dma_chan = dma_find_channel(DMA_MEMCPY); 1443 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1410 if (!qp->dma_chan) 1444 if (!qp->dma_chan) {
1445 dmaengine_put();
1411 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n"); 1446 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1412 else 1447 }
1413 dmaengine_get();
1414 1448
1415 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1449 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1416 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); 1450 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
@@ -1432,25 +1466,23 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1432 &qp->tx_free_q); 1466 &qp->tx_free_q);
1433 } 1467 }
1434 1468
1435 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1436
1437 rc = ntb_register_db_callback(qp->ndev, free_queue, qp, 1469 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1438 ntb_transport_rxc_db); 1470 ntb_transport_rxc_db);
1439 if (rc) 1471 if (rc)
1440 goto err3; 1472 goto err2;
1441 1473
1442 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1474 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1443 1475
1444 return qp; 1476 return qp;
1445 1477
1446err3:
1447 tasklet_disable(&qp->rx_work);
1448err2: 1478err2:
1449 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1479 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1450 kfree(entry); 1480 kfree(entry);
1451err1: 1481err1:
1452 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1482 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1453 kfree(entry); 1483 kfree(entry);
1484 if (qp->dma_chan)
1485 dmaengine_put();
1454 set_bit(free_queue, &nt->qp_bitmap); 1486 set_bit(free_queue, &nt->qp_bitmap);
1455err: 1487err:
1456 return NULL; 1488 return NULL;
@@ -1489,7 +1521,6 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1489 } 1521 }
1490 1522
1491 ntb_unregister_db_callback(qp->ndev, qp->qp_num); 1523 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1492 tasklet_disable(&qp->rx_work);
1493 1524
1494 cancel_delayed_work_sync(&qp->link_work); 1525 cancel_delayed_work_sync(&qp->link_work);
1495 1526
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 95655d7c0d0b..e52d7ffa38b9 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -410,7 +410,7 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
410 * Otherwise is returns a bitmask with supported features. Current 410 * Otherwise is returns a bitmask with supported features. Current
411 * features reported are: 411 * features reported are:
412 * PCI_PASID_CAP_EXEC - Execute permission supported 412 * PCI_PASID_CAP_EXEC - Execute permission supported
413 * PCI_PASID_CAP_PRIV - Priviledged mode supported 413 * PCI_PASID_CAP_PRIV - Privileged mode supported
414 */ 414 */
415int pci_pasid_features(struct pci_dev *pdev) 415int pci_pasid_features(struct pci_dev *pdev)
416{ 416{
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index c269e430c760..2aa7b77c7c88 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -447,6 +447,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
447 *value = 0; 447 *value = 0;
448 break; 448 break;
449 449
450 case PCI_INTERRUPT_LINE:
451 /* LINE PIN MIN_GNT MAX_LAT */
452 *value = 0;
453 break;
454
450 default: 455 default:
451 *value = 0xffffffff; 456 *value = 0xffffffff;
452 return PCIBIOS_BAD_REGISTER_NUMBER; 457 return PCIBIOS_BAD_REGISTER_NUMBER;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 7c4f38dd42ba..0afbbbc55c81 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -249,7 +249,7 @@ struct tegra_pcie {
249 void __iomem *afi; 249 void __iomem *afi;
250 int irq; 250 int irq;
251 251
252 struct list_head busses; 252 struct list_head buses;
253 struct resource *cs; 253 struct resource *cs;
254 254
255 struct resource io; 255 struct resource io;
@@ -399,14 +399,14 @@ free:
399 399
400/* 400/*
401 * Look up a virtual address mapping for the specified bus number. If no such 401 * Look up a virtual address mapping for the specified bus number. If no such
402 * mapping existis, try to create one. 402 * mapping exists, try to create one.
403 */ 403 */
404static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie, 404static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
405 unsigned int busnr) 405 unsigned int busnr)
406{ 406{
407 struct tegra_pcie_bus *bus; 407 struct tegra_pcie_bus *bus;
408 408
409 list_for_each_entry(bus, &pcie->busses, list) 409 list_for_each_entry(bus, &pcie->buses, list)
410 if (bus->nr == busnr) 410 if (bus->nr == busnr)
411 return (void __iomem *)bus->area->addr; 411 return (void __iomem *)bus->area->addr;
412 412
@@ -414,7 +414,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
414 if (IS_ERR(bus)) 414 if (IS_ERR(bus))
415 return NULL; 415 return NULL;
416 416
417 list_add_tail(&bus->list, &pcie->busses); 417 list_add_tail(&bus->list, &pcie->buses);
418 418
419 return (void __iomem *)bus->area->addr; 419 return (void __iomem *)bus->area->addr;
420} 420}
@@ -808,7 +808,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
808 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; 808 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
809 afi_writel(pcie, value, AFI_FUSE); 809 afi_writel(pcie, value, AFI_FUSE);
810 810
811 /* initialze internal PHY, enable up to 16 PCIE lanes */ 811 /* initialize internal PHY, enable up to 16 PCIE lanes */
812 pads_writel(pcie, 0x0, PADS_CTL_SEL); 812 pads_writel(pcie, 0x0, PADS_CTL_SEL);
813 813
814 /* override IDDQ to 1 on all 4 lanes */ 814 /* override IDDQ to 1 on all 4 lanes */
@@ -1624,7 +1624,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
1624 if (!pcie) 1624 if (!pcie)
1625 return -ENOMEM; 1625 return -ENOMEM;
1626 1626
1627 INIT_LIST_HEAD(&pcie->busses); 1627 INIT_LIST_HEAD(&pcie->buses);
1628 INIT_LIST_HEAD(&pcie->ports); 1628 INIT_LIST_HEAD(&pcie->ports);
1629 pcie->soc_data = match->data; 1629 pcie->soc_data = match->data;
1630 pcie->dev = &pdev->dev; 1630 pcie->dev = &pdev->dev;
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 1e1fea4d959b..e33b68be0391 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -197,7 +197,7 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
197 return -ENOSPC; 197 return -ENOSPC;
198 /* 198 /*
199 * Check if this position is at correct offset.nvec is always a 199 * Check if this position is at correct offset.nvec is always a
200 * power of two. pos0 must be nvec bit alligned. 200 * power of two. pos0 must be nvec bit aligned.
201 */ 201 */
202 if (pos % msgvec) 202 if (pos % msgvec)
203 pos += msgvec - (pos % msgvec); 203 pos += msgvec - (pos % msgvec);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 0a648af89531..df8caec59789 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -133,8 +133,8 @@ config HOTPLUG_PCI_RPA_DLPAR
133 133
134 To compile this driver as a module, choose M here: the 134 To compile this driver as a module, choose M here: the
135 module will be called rpadlpar_io. 135 module will be called rpadlpar_io.
136 136
137 When in doubt, say N. 137 When in doubt, say N.
138 138
139config HOTPLUG_PCI_SGI 139config HOTPLUG_PCI_SGI
140 tristate "SGI PCI Hotplug Support" 140 tristate "SGI PCI Hotplug Support"
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 47ec8c80e16d..3e6532b945c1 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -31,7 +31,7 @@ pci_hotplug-objs += cpci_hotplug_core.o \
31 cpci_hotplug_pci.o 31 cpci_hotplug_pci.o
32endif 32endif
33ifdef CONFIG_ACPI 33ifdef CONFIG_ACPI
34pci_hotplug-objs += acpi_pcihp.o 34pci_hotplug-objs += acpi_pcihp.o
35endif 35endif
36 36
37cpqphp-objs := cpqphp_core.o \ 37cpqphp-objs := cpqphp_core.o \
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 1ce8ee054f1a..a94d850ae228 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -367,7 +367,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
367 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; 367 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
368 } 368 }
369 369
370 handle = DEVICE_ACPI_HANDLE(&pdev->dev); 370 handle = ACPI_HANDLE(&pdev->dev);
371 if (!handle) { 371 if (!handle) {
372 /* 372 /*
373 * This hotplug controller was not listed in the ACPI name 373 * This hotplug controller was not listed in the ACPI name
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 26100f510b10..1592dbe4f904 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -176,7 +176,6 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
176u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot); 176u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot);
177 177
178/* variables */ 178/* variables */
179extern bool acpiphp_debug;
180extern bool acpiphp_disabled; 179extern bool acpiphp_disabled;
181 180
182#endif /* _ACPIPHP_H */ 181#endif /* _ACPIPHP_H */
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 8650d39db392..dca66bc44578 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -111,7 +111,7 @@ int acpiphp_register_attention(struct acpiphp_attention_info *info)
111 * @info: must match the pointer used to register 111 * @info: must match the pointer used to register
112 * 112 *
113 * Description: This is used to un-register a hardware specific acpi 113 * Description: This is used to un-register a hardware specific acpi
114 * driver that manipulates the attention LED. The pointer to the 114 * driver that manipulates the attention LED. The pointer to the
115 * info struct must be the same as the one used to set it. 115 * info struct must be the same as the one used to set it.
116 */ 116 */
117int acpiphp_unregister_attention(struct acpiphp_attention_info *info) 117int acpiphp_unregister_attention(struct acpiphp_attention_info *info)
@@ -169,8 +169,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
169 * was registered with us. This allows hardware specific 169 * was registered with us. This allows hardware specific
170 * ACPI implementations to blink the light for us. 170 * ACPI implementations to blink the light for us.
171 */ 171 */
172 static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) 172static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
173 { 173{
174 int retval = -ENODEV; 174 int retval = -ENODEV;
175 175
176 pr_debug("%s - physical_slot = %s\n", __func__, 176 pr_debug("%s - physical_slot = %s\n", __func__,
@@ -182,8 +182,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
182 } else 182 } else
183 attention_info = NULL; 183 attention_info = NULL;
184 return retval; 184 return retval;
185 } 185}
186 186
187 187
188/** 188/**
189 * get_power_status - get power status of a slot 189 * get_power_status - get power status of a slot
@@ -323,7 +323,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
323 if (retval) { 323 if (retval) {
324 pr_err("pci_hp_register failed with error %d\n", retval); 324 pr_err("pci_hp_register failed with error %d\n", retval);
325 goto error_hpslot; 325 goto error_hpslot;
326 } 326 }
327 327
328 pr_info("Slot [%s] registered\n", slot_name(slot)); 328 pr_info("Slot [%s] registered\n", slot_name(slot));
329 329
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 5b4e9eb0e8ff..1cf605f67673 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -325,7 +325,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
325 325
326 list_add_tail(&slot->node, &bridge->slots); 326 list_add_tail(&slot->node, &bridge->slots);
327 327
328 /* Register slots for ejectable funtions only. */ 328 /* Register slots for ejectable functions only. */
329 if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) { 329 if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) {
330 unsigned long long sun; 330 unsigned long long sun;
331 int retval; 331 int retval;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 0d64c414bf78..ecfac7e72d91 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -116,7 +116,7 @@ static struct bin_attribute ibm_apci_table_attr = {
116 .read = ibm_read_apci_table, 116 .read = ibm_read_apci_table,
117 .write = NULL, 117 .write = NULL,
118}; 118};
119static struct acpiphp_attention_info ibm_attention_info = 119static struct acpiphp_attention_info ibm_attention_info =
120{ 120{
121 .set_attn = ibm_set_attention_status, 121 .set_attn = ibm_set_attention_status,
122 .get_attn = ibm_get_attention_status, 122 .get_attn = ibm_get_attention_status,
@@ -171,9 +171,9 @@ ibm_slot_done:
171 */ 171 */
172static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) 172static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
173{ 173{
174 union acpi_object args[2]; 174 union acpi_object args[2];
175 struct acpi_object_list params = { .pointer = args, .count = 2 }; 175 struct acpi_object_list params = { .pointer = args, .count = 2 };
176 acpi_status stat; 176 acpi_status stat;
177 unsigned long long rc; 177 unsigned long long rc;
178 union apci_descriptor *ibm_slot; 178 union apci_descriptor *ibm_slot;
179 179
@@ -208,7 +208,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
208 * 208 *
209 * Description: This method is registered with the acpiphp module as a 209 * Description: This method is registered with the acpiphp module as a
210 * callback to do the device specific task of getting the LED status. 210 * callback to do the device specific task of getting the LED status.
211 * 211 *
212 * Because there is no direct method of getting the LED status directly 212 * Because there is no direct method of getting the LED status directly
213 * from an ACPI call, we read the aPCI table and parse out our 213 * from an ACPI call, we read the aPCI table and parse out our
214 * slot descriptor to read the status from that. 214 * slot descriptor to read the status from that.
@@ -259,7 +259,7 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
259 pr_debug("%s: Received notification %02x\n", __func__, event); 259 pr_debug("%s: Received notification %02x\n", __func__, event);
260 260
261 if (subevent == 0x80) { 261 if (subevent == 0x80) {
262 pr_debug("%s: generationg bus event\n", __func__); 262 pr_debug("%s: generating bus event\n", __func__);
263 acpi_bus_generate_netlink_event(note->device->pnp.device_class, 263 acpi_bus_generate_netlink_event(note->device->pnp.device_class,
264 dev_name(&note->device->dev), 264 dev_name(&note->device->dev),
265 note->event, detail); 265 note->event, detail);
@@ -387,7 +387,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
387 u32 lvl, void *context, void **rv) 387 u32 lvl, void *context, void **rv)
388{ 388{
389 acpi_handle *phandle = (acpi_handle *)context; 389 acpi_handle *phandle = (acpi_handle *)context;
390 acpi_status status; 390 acpi_status status;
391 struct acpi_device_info *info; 391 struct acpi_device_info *info;
392 int retval = 0; 392 int retval = 0;
393 393
@@ -405,7 +405,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
405 info->hardware_id.string, handle); 405 info->hardware_id.string, handle);
406 *phandle = handle; 406 *phandle = handle;
407 /* returning non-zero causes the search to stop 407 /* returning non-zero causes the search to stop
408 * and returns this value to the caller of 408 * and returns this value to the caller of
409 * acpi_walk_namespace, but it also causes some warnings 409 * acpi_walk_namespace, but it also causes some warnings
410 * in the acpi debug code to print... 410 * in the acpi debug code to print...
411 */ 411 */
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 2b4c412f94c3..00c81a3cefc9 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -46,7 +46,7 @@
46 do { \ 46 do { \
47 if (cpci_debug) \ 47 if (cpci_debug) \
48 printk (KERN_DEBUG "%s: " format "\n", \ 48 printk (KERN_DEBUG "%s: " format "\n", \
49 MY_NAME , ## arg); \ 49 MY_NAME , ## arg); \
50 } while (0) 50 } while (0)
51#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) 51#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
52#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) 52#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index d8add34177f2..d3add9819f63 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -39,7 +39,7 @@ extern int cpci_debug;
39 do { \ 39 do { \
40 if (cpci_debug) \ 40 if (cpci_debug) \
41 printk (KERN_DEBUG "%s: " format "\n", \ 41 printk (KERN_DEBUG "%s: " format "\n", \
42 MY_NAME , ## arg); \ 42 MY_NAME , ## arg); \
43 } while (0) 43 } while (0)
44#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) 44#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
45#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) 45#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index a6a71c41cdf8..7536eef620b0 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -13,14 +13,14 @@
13 * option) any later version. 13 * option) any later version.
14 * 14 *
15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 16 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
17 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 17 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
18 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 * You should have received a copy of the GNU General Public License along 26 * You should have received a copy of the GNU General Public License along
@@ -53,9 +53,9 @@
53 53
54#define dbg(format, arg...) \ 54#define dbg(format, arg...) \
55 do { \ 55 do { \
56 if(debug) \ 56 if (debug) \
57 printk (KERN_DEBUG "%s: " format "\n", \ 57 printk (KERN_DEBUG "%s: " format "\n", \
58 MY_NAME , ## arg); \ 58 MY_NAME , ## arg); \
59 } while(0) 59 } while(0)
60#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) 60#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
61#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) 61#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 449b4bbc8301..e8c4a7ccf578 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -13,14 +13,14 @@
13 * option) any later version. 13 * option) any later version.
14 * 14 *
15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 16 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
17 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 17 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
18 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 * You should have received a copy of the GNU General Public License along 26 * You should have received a copy of the GNU General Public License along
@@ -48,9 +48,9 @@
48 48
49#define dbg(format, arg...) \ 49#define dbg(format, arg...) \
50 do { \ 50 do { \
51 if(debug) \ 51 if (debug) \
52 printk (KERN_DEBUG "%s: " format "\n", \ 52 printk (KERN_DEBUG "%s: " format "\n", \
53 MY_NAME , ## arg); \ 53 MY_NAME , ## arg); \
54 } while(0) 54 } while(0)
55#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) 55#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
56#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) 56#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
@@ -285,7 +285,7 @@ static struct pci_device_id zt5550_hc_pci_tbl[] = {
285 { 0, } 285 { 0, }
286}; 286};
287MODULE_DEVICE_TABLE(pci, zt5550_hc_pci_tbl); 287MODULE_DEVICE_TABLE(pci, zt5550_hc_pci_tbl);
288 288
289static struct pci_driver zt5550_hc_driver = { 289static struct pci_driver zt5550_hc_driver = {
290 .name = "zt5550_hc", 290 .name = "zt5550_hc",
291 .id_table = zt5550_hc_pci_tbl, 291 .id_table = zt5550_hc_pci_tbl,
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.h b/drivers/pci/hotplug/cpcihp_zt5550.h
index bebc6060a558..9a57fda5348c 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.h
+++ b/drivers/pci/hotplug/cpcihp_zt5550.h
@@ -13,14 +13,14 @@
13 * option) any later version. 13 * option) any later version.
14 * 14 *
15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 16 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
17 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 17 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
18 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 * You should have received a copy of the GNU General Public License along 26 * You should have received a copy of the GNU General Public License along
@@ -55,7 +55,7 @@
55#define HC_CMD_REG 0x0C 55#define HC_CMD_REG 0x0C
56#define ARB_CONFIG_GNT_REG 0x10 56#define ARB_CONFIG_GNT_REG 0x10
57#define ARB_CONFIG_CFG_REG 0x12 57#define ARB_CONFIG_CFG_REG 0x12
58#define ARB_CONFIG_REG 0x10 58#define ARB_CONFIG_REG 0x10
59#define ISOL_CONFIG_REG 0x18 59#define ISOL_CONFIG_REG 0x18
60#define FAULT_STATUS_REG 0x20 60#define FAULT_STATUS_REG 0x20
61#define FAULT_CONFIG_REG 0x24 61#define FAULT_CONFIG_REG 0x24
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c8eaeb43fa5d..31273e155e6c 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -862,10 +862,10 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
862 goto err_disable_device; 862 goto err_disable_device;
863 } 863 }
864 864
865 /* Check for the proper subsystem ID's 865 /* Check for the proper subsystem IDs
866 * Intel uses a different SSID programming model than Compaq. 866 * Intel uses a different SSID programming model than Compaq.
867 * For Intel, each SSID bit identifies a PHP capability. 867 * For Intel, each SSID bit identifies a PHP capability.
868 * Also Intel HPC's may have RID=0. 868 * Also Intel HPCs may have RID=0.
869 */ 869 */
870 if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) { 870 if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
871 err(msg_HPC_not_supported); 871 err(msg_HPC_not_supported);
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index d282019cda5f..11845b796799 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1231,7 +1231,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1231 1231
1232 /* Only if mode change...*/ 1232 /* Only if mode change...*/
1233 if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || 1233 if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
1234 ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) 1234 ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
1235 set_SOGO(ctrl); 1235 set_SOGO(ctrl);
1236 1236
1237 wait_for_ctrl_irq(ctrl); 1237 wait_for_ctrl_irq(ctrl);
@@ -1828,7 +1828,7 @@ static void interrupt_event_handler(struct controller *ctrl)
1828 1828
1829 if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) { 1829 if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) {
1830 dbg("button pressed\n"); 1830 dbg("button pressed\n");
1831 } else if (ctrl->event_queue[loop].event_type == 1831 } else if (ctrl->event_queue[loop].event_type ==
1832 INT_BUTTON_CANCEL) { 1832 INT_BUTTON_CANCEL) {
1833 dbg("button cancel\n"); 1833 dbg("button cancel\n");
1834 del_timer(&p_slot->task_event); 1834 del_timer(&p_slot->task_event);
@@ -2411,11 +2411,11 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2411 if (rc) 2411 if (rc)
2412 return rc; 2412 return rc;
2413 2413
2414 /* find range of busses to use */ 2414 /* find range of buses to use */
2415 dbg("find ranges of buses to use\n"); 2415 dbg("find ranges of buses to use\n");
2416 bus_node = get_max_resource(&(resources->bus_head), 1); 2416 bus_node = get_max_resource(&(resources->bus_head), 1);
2417 2417
2418 /* If we don't have any busses to allocate, we can't continue */ 2418 /* If we don't have any buses to allocate, we can't continue */
2419 if (!bus_node) 2419 if (!bus_node)
2420 return -ENOMEM; 2420 return -ENOMEM;
2421 2421
@@ -2900,7 +2900,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2900 2900
2901 /* If this function needs an interrupt and we are behind 2901 /* If this function needs an interrupt and we are behind
2902 * a bridge and the pin is tied to something that's 2902 * a bridge and the pin is tied to something that's
2903 * alread mapped, set this one the same */ 2903 * already mapped, set this one the same */
2904 if (temp_byte && resources->irqs && 2904 if (temp_byte && resources->irqs &&
2905 (resources->irqs->valid_INT & 2905 (resources->irqs->valid_INT &
2906 (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { 2906 (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 09801c6945ce..6e4a12c91adb 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -291,7 +291,7 @@ int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 s
291 * 291 *
292 * Reads configuration for all slots in a PCI bus and saves info. 292 * Reads configuration for all slots in a PCI bus and saves info.
293 * 293 *
294 * Note: For non-hot plug busses, the slot # saved is the device # 294 * Note: For non-hot plug buses, the slot # saved is the device #
295 * 295 *
296 * returns 0 if success 296 * returns 0 if success
297 */ 297 */
@@ -455,7 +455,7 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
455 * cpqhp_save_slot_config 455 * cpqhp_save_slot_config
456 * 456 *
457 * Saves configuration info for all PCI devices in a given slot 457 * Saves configuration info for all PCI devices in a given slot
458 * including subordinate busses. 458 * including subordinate buses.
459 * 459 *
460 * returns 0 if success 460 * returns 0 if success
461 */ 461 */
@@ -1556,4 +1556,3 @@ void cpqhp_destroy_board_resources (struct pci_func * func)
1556 kfree(tres); 1556 kfree(tres);
1557 } 1557 }
1558} 1558}
1559
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index 8c5b25871d02..e3e46a7b3ee7 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -59,7 +59,7 @@ extern int ibmphp_debug;
59 59
60 60
61/************************************************************ 61/************************************************************
62* RESOURE TYPE * 62* RESOURCE TYPE *
63************************************************************/ 63************************************************************/
64 64
65#define EBDA_RSRC_TYPE_MASK 0x03 65#define EBDA_RSRC_TYPE_MASK 0x03
@@ -103,7 +103,7 @@ extern int ibmphp_debug;
103//-------------------------------------------------------------- 103//--------------------------------------------------------------
104 104
105struct rio_table_hdr { 105struct rio_table_hdr {
106 u8 ver_num; 106 u8 ver_num;
107 u8 scal_count; 107 u8 scal_count;
108 u8 riodev_count; 108 u8 riodev_count;
109 u16 offset; 109 u16 offset;
@@ -127,7 +127,7 @@ struct scal_detail {
127}; 127};
128 128
129//-------------------------------------------------------------- 129//--------------------------------------------------------------
130// RIO DETAIL 130// RIO DETAIL
131//-------------------------------------------------------------- 131//--------------------------------------------------------------
132 132
133struct rio_detail { 133struct rio_detail {
@@ -152,7 +152,7 @@ struct opt_rio {
152 u8 first_slot_num; 152 u8 first_slot_num;
153 u8 middle_num; 153 u8 middle_num;
154 struct list_head opt_rio_list; 154 struct list_head opt_rio_list;
155}; 155};
156 156
157struct opt_rio_lo { 157struct opt_rio_lo {
158 u8 rio_type; 158 u8 rio_type;
@@ -161,7 +161,7 @@ struct opt_rio_lo {
161 u8 middle_num; 161 u8 middle_num;
162 u8 pack_count; 162 u8 pack_count;
163 struct list_head opt_rio_lo_list; 163 struct list_head opt_rio_lo_list;
164}; 164};
165 165
166/**************************************************************** 166/****************************************************************
167* HPC DESCRIPTOR NODE * 167* HPC DESCRIPTOR NODE *
@@ -574,7 +574,7 @@ void ibmphp_hpc_stop_poll_thread(void);
574#define HPC_CTLR_IRQ_PENDG 0x80 574#define HPC_CTLR_IRQ_PENDG 0x80
575 575
576//---------------------------------------------------------------------------- 576//----------------------------------------------------------------------------
577// HPC_CTLR_WROKING status return codes 577// HPC_CTLR_WORKING status return codes
578//---------------------------------------------------------------------------- 578//----------------------------------------------------------------------------
579#define HPC_CTLR_WORKING_NO 0x00 579#define HPC_CTLR_WORKING_NO 0x00
580#define HPC_CTLR_WORKING_YES 0x01 580#define HPC_CTLR_WORKING_YES 0x01
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index cbd72d81d253..efdc13adbe41 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -58,7 +58,7 @@ MODULE_DESCRIPTION (DRIVER_DESC);
58struct pci_bus *ibmphp_pci_bus; 58struct pci_bus *ibmphp_pci_bus;
59static int max_slots; 59static int max_slots;
60 60
61static int irqs[16]; /* PIC mode IRQ's we're using so far (in case MPS 61static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS
62 * tables don't provide default info for empty slots */ 62 * tables don't provide default info for empty slots */
63 63
64static int init_flag; 64static int init_flag;
@@ -71,20 +71,20 @@ static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value)
71 return get_max_adapter_speed_1 (hs, value, 1); 71 return get_max_adapter_speed_1 (hs, value, 1);
72} 72}
73*/ 73*/
74static inline int get_cur_bus_info(struct slot **sl) 74static inline int get_cur_bus_info(struct slot **sl)
75{ 75{
76 int rc = 1; 76 int rc = 1;
77 struct slot * slot_cur = *sl; 77 struct slot * slot_cur = *sl;
78 78
79 debug("options = %x\n", slot_cur->ctrl->options); 79 debug("options = %x\n", slot_cur->ctrl->options);
80 debug("revision = %x\n", slot_cur->ctrl->revision); 80 debug("revision = %x\n", slot_cur->ctrl->revision);
81 81
82 if (READ_BUS_STATUS(slot_cur->ctrl)) 82 if (READ_BUS_STATUS(slot_cur->ctrl))
83 rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL); 83 rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL);
84 84
85 if (rc) 85 if (rc)
86 return rc; 86 return rc;
87 87
88 slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus); 88 slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus);
89 if (READ_BUS_MODE(slot_cur->ctrl)) 89 if (READ_BUS_MODE(slot_cur->ctrl))
90 slot_cur->bus_on->current_bus_mode = 90 slot_cur->bus_on->current_bus_mode =
@@ -96,7 +96,7 @@ static inline int get_cur_bus_info(struct slot **sl)
96 slot_cur->busstatus, 96 slot_cur->busstatus,
97 slot_cur->bus_on->current_speed, 97 slot_cur->bus_on->current_speed,
98 slot_cur->bus_on->current_bus_mode); 98 slot_cur->bus_on->current_bus_mode);
99 99
100 *sl = slot_cur; 100 *sl = slot_cur;
101 return 0; 101 return 0;
102} 102}
@@ -104,8 +104,8 @@ static inline int get_cur_bus_info(struct slot **sl)
104static inline int slot_update(struct slot **sl) 104static inline int slot_update(struct slot **sl)
105{ 105{
106 int rc; 106 int rc;
107 rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL); 107 rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL);
108 if (rc) 108 if (rc)
109 return rc; 109 return rc;
110 if (!init_flag) 110 if (!init_flag)
111 rc = get_cur_bus_info(sl); 111 rc = get_cur_bus_info(sl);
@@ -172,7 +172,7 @@ int ibmphp_init_devno(struct slot **cur_slot)
172 debug("(*cur_slot)->irq[3] = %x\n", 172 debug("(*cur_slot)->irq[3] = %x\n",
173 (*cur_slot)->irq[3]); 173 (*cur_slot)->irq[3]);
174 174
175 debug("rtable->exlusive_irqs = %x\n", 175 debug("rtable->exclusive_irqs = %x\n",
176 rtable->exclusive_irqs); 176 rtable->exclusive_irqs);
177 debug("rtable->slots[loop].irq[0].bitmap = %x\n", 177 debug("rtable->slots[loop].irq[0].bitmap = %x\n",
178 rtable->slots[loop].irq[0].bitmap); 178 rtable->slots[loop].irq[0].bitmap);
@@ -271,7 +271,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
271 else 271 else
272 rc = -ENODEV; 272 rc = -ENODEV;
273 } 273 }
274 } else 274 } else
275 rc = -ENODEV; 275 rc = -ENODEV;
276 276
277 ibmphp_unlock_operations(); 277 ibmphp_unlock_operations();
@@ -288,7 +288,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
288 288
289 debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", 289 debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n",
290 (ulong) hotplug_slot, (ulong) value); 290 (ulong) hotplug_slot, (ulong) value);
291 291
292 ibmphp_lock_operations(); 292 ibmphp_lock_operations();
293 if (hotplug_slot) { 293 if (hotplug_slot) {
294 pslot = hotplug_slot->private; 294 pslot = hotplug_slot->private;
@@ -406,14 +406,14 @@ static int get_max_bus_speed(struct slot *slot)
406 406
407 ibmphp_lock_operations(); 407 ibmphp_lock_operations();
408 mode = slot->supported_bus_mode; 408 mode = slot->supported_bus_mode;
409 speed = slot->supported_speed; 409 speed = slot->supported_speed;
410 ibmphp_unlock_operations(); 410 ibmphp_unlock_operations();
411 411
412 switch (speed) { 412 switch (speed) {
413 case BUS_SPEED_33: 413 case BUS_SPEED_33:
414 break; 414 break;
415 case BUS_SPEED_66: 415 case BUS_SPEED_66:
416 if (mode == BUS_MODE_PCIX) 416 if (mode == BUS_MODE_PCIX)
417 speed += 0x01; 417 speed += 0x01;
418 break; 418 break;
419 case BUS_SPEED_100: 419 case BUS_SPEED_100:
@@ -515,13 +515,13 @@ static int __init init_ops(void)
515 515
516 debug("BEFORE GETTING SLOT STATUS, slot # %x\n", 516 debug("BEFORE GETTING SLOT STATUS, slot # %x\n",
517 slot_cur->number); 517 slot_cur->number);
518 if (slot_cur->ctrl->revision == 0xFF) 518 if (slot_cur->ctrl->revision == 0xFF)
519 if (get_ctrl_revision(slot_cur, 519 if (get_ctrl_revision(slot_cur,
520 &slot_cur->ctrl->revision)) 520 &slot_cur->ctrl->revision))
521 return -1; 521 return -1;
522 522
523 if (slot_cur->bus_on->current_speed == 0xFF) 523 if (slot_cur->bus_on->current_speed == 0xFF)
524 if (get_cur_bus_info(&slot_cur)) 524 if (get_cur_bus_info(&slot_cur))
525 return -1; 525 return -1;
526 get_max_bus_speed(slot_cur); 526 get_max_bus_speed(slot_cur);
527 527
@@ -539,8 +539,8 @@ static int __init init_ops(void)
539 debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status)); 539 debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status));
540 debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status)); 540 debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status));
541 541
542 if ((SLOT_PWRGD(slot_cur->status)) && 542 if ((SLOT_PWRGD(slot_cur->status)) &&
543 !(SLOT_PRESENT(slot_cur->status)) && 543 !(SLOT_PRESENT(slot_cur->status)) &&
544 !(SLOT_LATCH(slot_cur->status))) { 544 !(SLOT_LATCH(slot_cur->status))) {
545 debug("BEFORE POWER OFF COMMAND\n"); 545 debug("BEFORE POWER OFF COMMAND\n");
546 rc = power_off(slot_cur); 546 rc = power_off(slot_cur);
@@ -581,13 +581,13 @@ static int validate(struct slot *slot_cur, int opn)
581 581
582 switch (opn) { 582 switch (opn) {
583 case ENABLE: 583 case ENABLE:
584 if (!(SLOT_PWRGD(slot_cur->status)) && 584 if (!(SLOT_PWRGD(slot_cur->status)) &&
585 (SLOT_PRESENT(slot_cur->status)) && 585 (SLOT_PRESENT(slot_cur->status)) &&
586 !(SLOT_LATCH(slot_cur->status))) 586 !(SLOT_LATCH(slot_cur->status)))
587 return 0; 587 return 0;
588 break; 588 break;
589 case DISABLE: 589 case DISABLE:
590 if ((SLOT_PWRGD(slot_cur->status)) && 590 if ((SLOT_PWRGD(slot_cur->status)) &&
591 (SLOT_PRESENT(slot_cur->status)) && 591 (SLOT_PRESENT(slot_cur->status)) &&
592 !(SLOT_LATCH(slot_cur->status))) 592 !(SLOT_LATCH(slot_cur->status)))
593 return 0; 593 return 0;
@@ -617,7 +617,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
617 err("out of system memory\n"); 617 err("out of system memory\n");
618 return -ENOMEM; 618 return -ENOMEM;
619 } 619 }
620 620
621 info->power_status = SLOT_PWRGD(slot_cur->status); 621 info->power_status = SLOT_PWRGD(slot_cur->status);
622 info->attention_status = SLOT_ATTN(slot_cur->status, 622 info->attention_status = SLOT_ATTN(slot_cur->status,
623 slot_cur->ext_status); 623 slot_cur->ext_status);
@@ -638,7 +638,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
638 case BUS_SPEED_33: 638 case BUS_SPEED_33:
639 break; 639 break;
640 case BUS_SPEED_66: 640 case BUS_SPEED_66:
641 if (mode == BUS_MODE_PCIX) 641 if (mode == BUS_MODE_PCIX)
642 bus_speed += 0x01; 642 bus_speed += 0x01;
643 else if (mode == BUS_MODE_PCI) 643 else if (mode == BUS_MODE_PCI)
644 ; 644 ;
@@ -654,8 +654,8 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
654 } 654 }
655 655
656 bus->cur_bus_speed = bus_speed; 656 bus->cur_bus_speed = bus_speed;
657 // To do: bus_names 657 // To do: bus_names
658 658
659 rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); 659 rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
660 kfree(info); 660 kfree(info);
661 return rc; 661 return rc;
@@ -729,8 +729,8 @@ static void ibm_unconfigure_device(struct pci_func *func)
729} 729}
730 730
731/* 731/*
732 * The following function is to fix kernel bug regarding 732 * The following function is to fix kernel bug regarding
733 * getting bus entries, here we manually add those primary 733 * getting bus entries, here we manually add those primary
734 * bus entries to kernel bus structure whenever apply 734 * bus entries to kernel bus structure whenever apply
735 */ 735 */
736static u8 bus_structure_fixup(u8 busno) 736static u8 bus_structure_fixup(u8 busno)
@@ -814,7 +814,7 @@ static int ibm_configure_device(struct pci_func *func)
814} 814}
815 815
816/******************************************************* 816/*******************************************************
817 * Returns whether the bus is empty or not 817 * Returns whether the bus is empty or not
818 *******************************************************/ 818 *******************************************************/
819static int is_bus_empty(struct slot * slot_cur) 819static int is_bus_empty(struct slot * slot_cur)
820{ 820{
@@ -842,7 +842,7 @@ static int is_bus_empty(struct slot * slot_cur)
842} 842}
843 843
844/*********************************************************** 844/***********************************************************
845 * If the HPC permits and the bus currently empty, tries to set the 845 * If the HPC permits and the bus currently empty, tries to set the
846 * bus speed and mode at the maximum card and bus capability 846 * bus speed and mode at the maximum card and bus capability
847 * Parameters: slot 847 * Parameters: slot
848 * Returns: bus is set (0) or error code 848 * Returns: bus is set (0) or error code
@@ -856,7 +856,7 @@ static int set_bus(struct slot * slot_cur)
856 static struct pci_device_id ciobx[] = { 856 static struct pci_device_id ciobx[] = {
857 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) }, 857 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) },
858 { }, 858 { },
859 }; 859 };
860 860
861 debug("%s - entry slot # %d\n", __func__, slot_cur->number); 861 debug("%s - entry slot # %d\n", __func__, slot_cur->number);
862 if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) { 862 if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) {
@@ -877,7 +877,7 @@ static int set_bus(struct slot * slot_cur)
877 else if (!SLOT_BUS_MODE(slot_cur->ext_status)) 877 else if (!SLOT_BUS_MODE(slot_cur->ext_status))
878 /* if max slot/bus capability is 66 pci 878 /* if max slot/bus capability is 66 pci
879 and there's no bus mode mismatch, then 879 and there's no bus mode mismatch, then
880 the adapter supports 66 pci */ 880 the adapter supports 66 pci */
881 cmd = HPC_BUS_66CONVMODE; 881 cmd = HPC_BUS_66CONVMODE;
882 else 882 else
883 cmd = HPC_BUS_33CONVMODE; 883 cmd = HPC_BUS_33CONVMODE;
@@ -930,7 +930,7 @@ static int set_bus(struct slot * slot_cur)
930 return -EIO; 930 return -EIO;
931 } 931 }
932 } 932 }
933 /* This is for x440, once Brandon fixes the firmware, 933 /* This is for x440, once Brandon fixes the firmware,
934 will not need this delay */ 934 will not need this delay */
935 msleep(1000); 935 msleep(1000);
936 debug("%s -Exit\n", __func__); 936 debug("%s -Exit\n", __func__);
@@ -938,9 +938,9 @@ static int set_bus(struct slot * slot_cur)
938} 938}
939 939
940/* This routine checks the bus limitations that the slot is on from the BIOS. 940/* This routine checks the bus limitations that the slot is on from the BIOS.
941 * This is used in deciding whether or not to power up the slot. 941 * This is used in deciding whether or not to power up the slot.
942 * (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on 942 * (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on
943 * same bus) 943 * same bus)
944 * Parameters: slot 944 * Parameters: slot
945 * Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus 945 * Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus
946 */ 946 */
@@ -986,7 +986,7 @@ static int check_limitations(struct slot *slot_cur)
986static inline void print_card_capability(struct slot *slot_cur) 986static inline void print_card_capability(struct slot *slot_cur)
987{ 987{
988 info("capability of the card is "); 988 info("capability of the card is ");
989 if ((slot_cur->ext_status & CARD_INFO) == PCIX133) 989 if ((slot_cur->ext_status & CARD_INFO) == PCIX133)
990 info(" 133 MHz PCI-X\n"); 990 info(" 133 MHz PCI-X\n");
991 else if ((slot_cur->ext_status & CARD_INFO) == PCIX66) 991 else if ((slot_cur->ext_status & CARD_INFO) == PCIX66)
992 info(" 66 MHz PCI-X\n"); 992 info(" 66 MHz PCI-X\n");
@@ -1020,7 +1020,7 @@ static int enable_slot(struct hotplug_slot *hs)
1020 } 1020 }
1021 1021
1022 attn_LED_blink(slot_cur); 1022 attn_LED_blink(slot_cur);
1023 1023
1024 rc = set_bus(slot_cur); 1024 rc = set_bus(slot_cur);
1025 if (rc) { 1025 if (rc) {
1026 err("was not able to set the bus\n"); 1026 err("was not able to set the bus\n");
@@ -1082,7 +1082,7 @@ static int enable_slot(struct hotplug_slot *hs)
1082 rc = slot_update(&slot_cur); 1082 rc = slot_update(&slot_cur);
1083 if (rc) 1083 if (rc)
1084 goto error_power; 1084 goto error_power;
1085 1085
1086 rc = -EINVAL; 1086 rc = -EINVAL;
1087 if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) { 1087 if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) {
1088 err("power fault occurred trying to power up...\n"); 1088 err("power fault occurred trying to power up...\n");
@@ -1093,7 +1093,7 @@ static int enable_slot(struct hotplug_slot *hs)
1093 "speed and card capability\n"); 1093 "speed and card capability\n");
1094 print_card_capability(slot_cur); 1094 print_card_capability(slot_cur);
1095 goto error_power; 1095 goto error_power;
1096 } 1096 }
1097 /* Don't think this case will happen after above checks... 1097 /* Don't think this case will happen after above checks...
1098 * but just in case, for paranoia sake */ 1098 * but just in case, for paranoia sake */
1099 if (!(SLOT_POWER(slot_cur->status))) { 1099 if (!(SLOT_POWER(slot_cur->status))) {
@@ -1144,7 +1144,7 @@ static int enable_slot(struct hotplug_slot *hs)
1144 ibmphp_print_test(); 1144 ibmphp_print_test();
1145 rc = ibmphp_update_slot_info(slot_cur); 1145 rc = ibmphp_update_slot_info(slot_cur);
1146exit: 1146exit:
1147 ibmphp_unlock_operations(); 1147 ibmphp_unlock_operations();
1148 return rc; 1148 return rc;
1149 1149
1150error_nopower: 1150error_nopower:
@@ -1180,7 +1180,7 @@ static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot)
1180{ 1180{
1181 struct slot *slot = hotplug_slot->private; 1181 struct slot *slot = hotplug_slot->private;
1182 int rc; 1182 int rc;
1183 1183
1184 ibmphp_lock_operations(); 1184 ibmphp_lock_operations();
1185 rc = ibmphp_do_disable_slot(slot); 1185 rc = ibmphp_do_disable_slot(slot);
1186 ibmphp_unlock_operations(); 1186 ibmphp_unlock_operations();
@@ -1192,12 +1192,12 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
1192 int rc; 1192 int rc;
1193 u8 flag; 1193 u8 flag;
1194 1194
1195 debug("DISABLING SLOT...\n"); 1195 debug("DISABLING SLOT...\n");
1196 1196
1197 if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) { 1197 if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) {
1198 return -ENODEV; 1198 return -ENODEV;
1199 } 1199 }
1200 1200
1201 flag = slot_cur->flag; 1201 flag = slot_cur->flag;
1202 slot_cur->flag = 1; 1202 slot_cur->flag = 1;
1203 1203
@@ -1210,7 +1210,7 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
1210 attn_LED_blink(slot_cur); 1210 attn_LED_blink(slot_cur);
1211 1211
1212 if (slot_cur->func == NULL) { 1212 if (slot_cur->func == NULL) {
1213 /* We need this for fncs's that were there on bootup */ 1213 /* We need this for functions that were there on bootup */
1214 slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); 1214 slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL);
1215 if (!slot_cur->func) { 1215 if (!slot_cur->func) {
1216 err("out of system memory\n"); 1216 err("out of system memory\n");
@@ -1222,12 +1222,13 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
1222 } 1222 }
1223 1223
1224 ibm_unconfigure_device(slot_cur->func); 1224 ibm_unconfigure_device(slot_cur->func);
1225 1225
1226 /* If we got here from latch suddenly opening on operating card or 1226 /*
1227 a power fault, there's no power to the card, so cannot 1227 * If we got here from latch suddenly opening on operating card or
1228 read from it to determine what resources it occupied. This operation 1228 * a power fault, there's no power to the card, so cannot
1229 is forbidden anyhow. The best we can do is remove it from kernel 1229 * read from it to determine what resources it occupied. This operation
1230 lists at least */ 1230 * is forbidden anyhow. The best we can do is remove it from kernel
1231 * lists at least */
1231 1232
1232 if (!flag) { 1233 if (!flag) {
1233 attn_off(slot_cur); 1234 attn_off(slot_cur);
@@ -1264,7 +1265,7 @@ error:
1264 rc = -EFAULT; 1265 rc = -EFAULT;
1265 goto exit; 1266 goto exit;
1266 } 1267 }
1267 if (flag) 1268 if (flag)
1268 ibmphp_update_slot_info(slot_cur); 1269 ibmphp_update_slot_info(slot_cur);
1269 goto exit; 1270 goto exit;
1270} 1271}
@@ -1339,7 +1340,7 @@ static int __init ibmphp_init(void)
1339 debug("AFTER Resource & EBDA INITIALIZATIONS\n"); 1340 debug("AFTER Resource & EBDA INITIALIZATIONS\n");
1340 1341
1341 max_slots = get_max_slots(); 1342 max_slots = get_max_slots();
1342 1343
1343 if ((rc = ibmphp_register_pci())) 1344 if ((rc = ibmphp_register_pci()))
1344 goto error; 1345 goto error;
1345 1346
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 9df78bc14541..bd044158b36c 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -123,7 +123,7 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void)
123static void __init print_bus_info (void) 123static void __init print_bus_info (void)
124{ 124{
125 struct bus_info *ptr; 125 struct bus_info *ptr;
126 126
127 list_for_each_entry(ptr, &bus_info_head, bus_info_list) { 127 list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
128 debug ("%s - slot_min = %x\n", __func__, ptr->slot_min); 128 debug ("%s - slot_min = %x\n", __func__, ptr->slot_min);
129 debug ("%s - slot_max = %x\n", __func__, ptr->slot_max); 129 debug ("%s - slot_max = %x\n", __func__, ptr->slot_max);
@@ -131,7 +131,7 @@ static void __init print_bus_info (void)
131 debug ("%s - bus# = %x\n", __func__, ptr->busno); 131 debug ("%s - bus# = %x\n", __func__, ptr->busno);
132 debug ("%s - current_speed = %x\n", __func__, ptr->current_speed); 132 debug ("%s - current_speed = %x\n", __func__, ptr->current_speed);
133 debug ("%s - controller_id = %x\n", __func__, ptr->controller_id); 133 debug ("%s - controller_id = %x\n", __func__, ptr->controller_id);
134 134
135 debug ("%s - slots_at_33_conv = %x\n", __func__, ptr->slots_at_33_conv); 135 debug ("%s - slots_at_33_conv = %x\n", __func__, ptr->slots_at_33_conv);
136 debug ("%s - slots_at_66_conv = %x\n", __func__, ptr->slots_at_66_conv); 136 debug ("%s - slots_at_66_conv = %x\n", __func__, ptr->slots_at_66_conv);
137 debug ("%s - slots_at_66_pcix = %x\n", __func__, ptr->slots_at_66_pcix); 137 debug ("%s - slots_at_66_pcix = %x\n", __func__, ptr->slots_at_66_pcix);
@@ -144,7 +144,7 @@ static void __init print_bus_info (void)
144static void print_lo_info (void) 144static void print_lo_info (void)
145{ 145{
146 struct rio_detail *ptr; 146 struct rio_detail *ptr;
147 debug ("print_lo_info ----\n"); 147 debug ("print_lo_info ----\n");
148 list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) { 148 list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) {
149 debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); 149 debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
150 debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); 150 debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
@@ -176,7 +176,7 @@ static void __init print_ebda_pci_rsrc (void)
176 struct ebda_pci_rsrc *ptr; 176 struct ebda_pci_rsrc *ptr;
177 177
178 list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) { 178 list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) {
179 debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", 179 debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
180 __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr); 180 __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr);
181 } 181 }
182} 182}
@@ -259,7 +259,7 @@ int __init ibmphp_access_ebda (void)
259 ebda_seg = readw (io_mem); 259 ebda_seg = readw (io_mem);
260 iounmap (io_mem); 260 iounmap (io_mem);
261 debug ("returned ebda segment: %x\n", ebda_seg); 261 debug ("returned ebda segment: %x\n", ebda_seg);
262 262
263 io_mem = ioremap(ebda_seg<<4, 1); 263 io_mem = ioremap(ebda_seg<<4, 1);
264 if (!io_mem) 264 if (!io_mem)
265 return -ENOMEM; 265 return -ENOMEM;
@@ -310,7 +310,7 @@ int __init ibmphp_access_ebda (void)
310 re = readw (io_mem + sub_addr); /* next sub blk */ 310 re = readw (io_mem + sub_addr); /* next sub blk */
311 311
312 sub_addr += 2; 312 sub_addr += 2;
313 rc_id = readw (io_mem + sub_addr); /* sub blk id */ 313 rc_id = readw (io_mem + sub_addr); /* sub blk id */
314 314
315 sub_addr += 2; 315 sub_addr += 2;
316 if (rc_id != 0x5243) 316 if (rc_id != 0x5243)
@@ -330,7 +330,7 @@ int __init ibmphp_access_ebda (void)
330 debug ("info about hpc descriptor---\n"); 330 debug ("info about hpc descriptor---\n");
331 debug ("hot blk format: %x\n", format); 331 debug ("hot blk format: %x\n", format);
332 debug ("num of controller: %x\n", num_ctlrs); 332 debug ("num of controller: %x\n", num_ctlrs);
333 debug ("offset of hpc data structure enteries: %x\n ", sub_addr); 333 debug ("offset of hpc data structure entries: %x\n ", sub_addr);
334 334
335 sub_addr = base + re; /* re sub blk */ 335 sub_addr = base + re; /* re sub blk */
336 /* FIXME: rc is never used/checked */ 336 /* FIXME: rc is never used/checked */
@@ -359,7 +359,7 @@ int __init ibmphp_access_ebda (void)
359 debug ("info about rsrc descriptor---\n"); 359 debug ("info about rsrc descriptor---\n");
360 debug ("format: %x\n", format); 360 debug ("format: %x\n", format);
361 debug ("num of rsrc: %x\n", num_entries); 361 debug ("num of rsrc: %x\n", num_entries);
362 debug ("offset of rsrc data structure enteries: %x\n ", sub_addr); 362 debug ("offset of rsrc data structure entries: %x\n ", sub_addr);
363 363
364 hs_complete = 1; 364 hs_complete = 1;
365 } else { 365 } else {
@@ -376,7 +376,7 @@ int __init ibmphp_access_ebda (void)
376 rio_table_ptr->scal_count = readb (io_mem + offset + 1); 376 rio_table_ptr->scal_count = readb (io_mem + offset + 1);
377 rio_table_ptr->riodev_count = readb (io_mem + offset + 2); 377 rio_table_ptr->riodev_count = readb (io_mem + offset + 2);
378 rio_table_ptr->offset = offset +3 ; 378 rio_table_ptr->offset = offset +3 ;
379 379
380 debug("info about rio table hdr ---\n"); 380 debug("info about rio table hdr ---\n");
381 debug("ver_num: %x\nscal_count: %x\nriodev_count: %x\noffset of rio table: %x\n ", 381 debug("ver_num: %x\nscal_count: %x\nriodev_count: %x\noffset of rio table: %x\n ",
382 rio_table_ptr->ver_num, rio_table_ptr->scal_count, 382 rio_table_ptr->ver_num, rio_table_ptr->scal_count,
@@ -440,12 +440,12 @@ static int __init ebda_rio_table (void)
440 rio_detail_ptr->chassis_num = readb (io_mem + offset + 14); 440 rio_detail_ptr->chassis_num = readb (io_mem + offset + 14);
441// debug ("rio_node_id: %x\nbbar: %x\nrio_type: %x\nowner_id: %x\nport0_node: %x\nport0_port: %x\nport1_node: %x\nport1_port: %x\nfirst_slot_num: %x\nstatus: %x\n", rio_detail_ptr->rio_node_id, rio_detail_ptr->bbar, rio_detail_ptr->rio_type, rio_detail_ptr->owner_id, rio_detail_ptr->port0_node_connect, rio_detail_ptr->port0_port_connect, rio_detail_ptr->port1_node_connect, rio_detail_ptr->port1_port_connect, rio_detail_ptr->first_slot_num, rio_detail_ptr->status); 441// debug ("rio_node_id: %x\nbbar: %x\nrio_type: %x\nowner_id: %x\nport0_node: %x\nport0_port: %x\nport1_node: %x\nport1_port: %x\nfirst_slot_num: %x\nstatus: %x\n", rio_detail_ptr->rio_node_id, rio_detail_ptr->bbar, rio_detail_ptr->rio_type, rio_detail_ptr->owner_id, rio_detail_ptr->port0_node_connect, rio_detail_ptr->port0_port_connect, rio_detail_ptr->port1_node_connect, rio_detail_ptr->port1_port_connect, rio_detail_ptr->first_slot_num, rio_detail_ptr->status);
442 //create linked list of chassis 442 //create linked list of chassis
443 if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5) 443 if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5)
444 list_add (&rio_detail_ptr->rio_detail_list, &rio_vg_head); 444 list_add (&rio_detail_ptr->rio_detail_list, &rio_vg_head);
445 //create linked list of expansion box 445 //create linked list of expansion box
446 else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7) 446 else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7)
447 list_add (&rio_detail_ptr->rio_detail_list, &rio_lo_head); 447 list_add (&rio_detail_ptr->rio_detail_list, &rio_lo_head);
448 else 448 else
449 // not in my concern 449 // not in my concern
450 kfree (rio_detail_ptr); 450 kfree (rio_detail_ptr);
451 offset += 15; 451 offset += 15;
@@ -456,7 +456,7 @@ static int __init ebda_rio_table (void)
456} 456}
457 457
458/* 458/*
459 * reorganizing linked list of chassis 459 * reorganizing linked list of chassis
460 */ 460 */
461static struct opt_rio *search_opt_vg (u8 chassis_num) 461static struct opt_rio *search_opt_vg (u8 chassis_num)
462{ 462{
@@ -464,7 +464,7 @@ static struct opt_rio *search_opt_vg (u8 chassis_num)
464 list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { 464 list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
465 if (ptr->chassis_num == chassis_num) 465 if (ptr->chassis_num == chassis_num)
466 return ptr; 466 return ptr;
467 } 467 }
468 return NULL; 468 return NULL;
469} 469}
470 470
@@ -472,7 +472,7 @@ static int __init combine_wpg_for_chassis (void)
472{ 472{
473 struct opt_rio *opt_rio_ptr = NULL; 473 struct opt_rio *opt_rio_ptr = NULL;
474 struct rio_detail *rio_detail_ptr = NULL; 474 struct rio_detail *rio_detail_ptr = NULL;
475 475
476 list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) { 476 list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) {
477 opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num); 477 opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num);
478 if (!opt_rio_ptr) { 478 if (!opt_rio_ptr) {
@@ -484,14 +484,14 @@ static int __init combine_wpg_for_chassis (void)
484 opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num; 484 opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
485 opt_rio_ptr->middle_num = rio_detail_ptr->first_slot_num; 485 opt_rio_ptr->middle_num = rio_detail_ptr->first_slot_num;
486 list_add (&opt_rio_ptr->opt_rio_list, &opt_vg_head); 486 list_add (&opt_rio_ptr->opt_rio_list, &opt_vg_head);
487 } else { 487 } else {
488 opt_rio_ptr->first_slot_num = min (opt_rio_ptr->first_slot_num, rio_detail_ptr->first_slot_num); 488 opt_rio_ptr->first_slot_num = min (opt_rio_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
489 opt_rio_ptr->middle_num = max (opt_rio_ptr->middle_num, rio_detail_ptr->first_slot_num); 489 opt_rio_ptr->middle_num = max (opt_rio_ptr->middle_num, rio_detail_ptr->first_slot_num);
490 } 490 }
491 } 491 }
492 print_opt_vg (); 492 print_opt_vg ();
493 return 0; 493 return 0;
494} 494}
495 495
496/* 496/*
497 * reorganizing linked list of expansion box 497 * reorganizing linked list of expansion box
@@ -502,7 +502,7 @@ static struct opt_rio_lo *search_opt_lo (u8 chassis_num)
502 list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) { 502 list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) {
503 if (ptr->chassis_num == chassis_num) 503 if (ptr->chassis_num == chassis_num)
504 return ptr; 504 return ptr;
505 } 505 }
506 return NULL; 506 return NULL;
507} 507}
508 508
@@ -510,7 +510,7 @@ static int combine_wpg_for_expansion (void)
510{ 510{
511 struct opt_rio_lo *opt_rio_lo_ptr = NULL; 511 struct opt_rio_lo *opt_rio_lo_ptr = NULL;
512 struct rio_detail *rio_detail_ptr = NULL; 512 struct rio_detail *rio_detail_ptr = NULL;
513 513
514 list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) { 514 list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) {
515 opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num); 515 opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num);
516 if (!opt_rio_lo_ptr) { 516 if (!opt_rio_lo_ptr) {
@@ -522,22 +522,22 @@ static int combine_wpg_for_expansion (void)
522 opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num; 522 opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
523 opt_rio_lo_ptr->middle_num = rio_detail_ptr->first_slot_num; 523 opt_rio_lo_ptr->middle_num = rio_detail_ptr->first_slot_num;
524 opt_rio_lo_ptr->pack_count = 1; 524 opt_rio_lo_ptr->pack_count = 1;
525 525
526 list_add (&opt_rio_lo_ptr->opt_rio_lo_list, &opt_lo_head); 526 list_add (&opt_rio_lo_ptr->opt_rio_lo_list, &opt_lo_head);
527 } else { 527 } else {
528 opt_rio_lo_ptr->first_slot_num = min (opt_rio_lo_ptr->first_slot_num, rio_detail_ptr->first_slot_num); 528 opt_rio_lo_ptr->first_slot_num = min (opt_rio_lo_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
529 opt_rio_lo_ptr->middle_num = max (opt_rio_lo_ptr->middle_num, rio_detail_ptr->first_slot_num); 529 opt_rio_lo_ptr->middle_num = max (opt_rio_lo_ptr->middle_num, rio_detail_ptr->first_slot_num);
530 opt_rio_lo_ptr->pack_count = 2; 530 opt_rio_lo_ptr->pack_count = 2;
531 } 531 }
532 } 532 }
533 return 0; 533 return 0;
534} 534}
535 535
536 536
537/* Since we don't know the max slot number per each chassis, hence go 537/* Since we don't know the max slot number per each chassis, hence go
538 * through the list of all chassis to find out the range 538 * through the list of all chassis to find out the range
539 * Arguments: slot_num, 1st slot number of the chassis we think we are on, 539 * Arguments: slot_num, 1st slot number of the chassis we think we are on,
540 * var (0 = chassis, 1 = expansion box) 540 * var (0 = chassis, 1 = expansion box)
541 */ 541 */
542static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) 542static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
543{ 543{
@@ -547,7 +547,7 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
547 547
548 if (!var) { 548 if (!var) {
549 list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { 549 list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
550 if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { 550 if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
551 rc = -ENODEV; 551 rc = -ENODEV;
552 break; 552 break;
553 } 553 }
@@ -569,7 +569,7 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num)
569 569
570 list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { 570 list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
571 //check to see if this slot_num belongs to expansion box 571 //check to see if this slot_num belongs to expansion box
572 if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) 572 if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
573 return opt_lo_ptr; 573 return opt_lo_ptr;
574 } 574 }
575 return NULL; 575 return NULL;
@@ -580,8 +580,8 @@ static struct opt_rio * find_chassis_num (u8 slot_num)
580 struct opt_rio *opt_vg_ptr; 580 struct opt_rio *opt_vg_ptr;
581 581
582 list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { 582 list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
583 //check to see if this slot_num belongs to chassis 583 //check to see if this slot_num belongs to chassis
584 if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) 584 if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
585 return opt_vg_ptr; 585 return opt_vg_ptr;
586 } 586 }
587 return NULL; 587 return NULL;
@@ -594,13 +594,13 @@ static u8 calculate_first_slot (u8 slot_num)
594{ 594{
595 u8 first_slot = 1; 595 u8 first_slot = 1;
596 struct slot * slot_cur; 596 struct slot * slot_cur;
597 597
598 list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) { 598 list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) {
599 if (slot_cur->ctrl) { 599 if (slot_cur->ctrl) {
600 if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) 600 if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
601 first_slot = slot_cur->ctrl->ending_slot_num; 601 first_slot = slot_cur->ctrl->ending_slot_num;
602 } 602 }
603 } 603 }
604 return first_slot + 1; 604 return first_slot + 1;
605 605
606} 606}
@@ -622,11 +622,11 @@ static char *create_file_name (struct slot * slot_cur)
622 err ("Structure passed is empty\n"); 622 err ("Structure passed is empty\n");
623 return NULL; 623 return NULL;
624 } 624 }
625 625
626 slot_num = slot_cur->number; 626 slot_num = slot_cur->number;
627 627
628 memset (str, 0, sizeof(str)); 628 memset (str, 0, sizeof(str));
629 629
630 if (rio_table_ptr) { 630 if (rio_table_ptr) {
631 if (rio_table_ptr->ver_num == 3) { 631 if (rio_table_ptr->ver_num == 3) {
632 opt_vg_ptr = find_chassis_num (slot_num); 632 opt_vg_ptr = find_chassis_num (slot_num);
@@ -660,7 +660,7 @@ static char *create_file_name (struct slot * slot_cur)
660 /* if both NULL and we DO have correct RIO table in BIOS */ 660 /* if both NULL and we DO have correct RIO table in BIOS */
661 return NULL; 661 return NULL;
662 } 662 }
663 } 663 }
664 if (!flag) { 664 if (!flag) {
665 if (slot_cur->ctrl->ctlr_type == 4) { 665 if (slot_cur->ctrl->ctlr_type == 4) {
666 first_slot = calculate_first_slot (slot_num); 666 first_slot = calculate_first_slot (slot_num);
@@ -798,7 +798,7 @@ static int __init ebda_rsrc_controller (void)
798 slot_ptr->ctl_index = readb (io_mem + addr_slot + 2*slot_num); 798 slot_ptr->ctl_index = readb (io_mem + addr_slot + 2*slot_num);
799 slot_ptr->slot_cap = readb (io_mem + addr_slot + 3*slot_num); 799 slot_ptr->slot_cap = readb (io_mem + addr_slot + 3*slot_num);
800 800
801 // create bus_info lined list --- if only one slot per bus: slot_min = slot_max 801 // create bus_info lined list --- if only one slot per bus: slot_min = slot_max
802 802
803 bus_info_ptr2 = ibmphp_find_same_bus_num (slot_ptr->slot_bus_num); 803 bus_info_ptr2 = ibmphp_find_same_bus_num (slot_ptr->slot_bus_num);
804 if (!bus_info_ptr2) { 804 if (!bus_info_ptr2) {
@@ -814,9 +814,9 @@ static int __init ebda_rsrc_controller (void)
814 bus_info_ptr1->index = bus_index++; 814 bus_info_ptr1->index = bus_index++;
815 bus_info_ptr1->current_speed = 0xff; 815 bus_info_ptr1->current_speed = 0xff;
816 bus_info_ptr1->current_bus_mode = 0xff; 816 bus_info_ptr1->current_bus_mode = 0xff;
817 817
818 bus_info_ptr1->controller_id = hpc_ptr->ctlr_id; 818 bus_info_ptr1->controller_id = hpc_ptr->ctlr_id;
819 819
820 list_add_tail (&bus_info_ptr1->bus_info_list, &bus_info_head); 820 list_add_tail (&bus_info_ptr1->bus_info_list, &bus_info_head);
821 821
822 } else { 822 } else {
@@ -851,7 +851,7 @@ static int __init ebda_rsrc_controller (void)
851 bus_info_ptr2->slots_at_66_conv = bus_ptr->slots_at_66_conv; 851 bus_info_ptr2->slots_at_66_conv = bus_ptr->slots_at_66_conv;
852 bus_info_ptr2->slots_at_66_pcix = bus_ptr->slots_at_66_pcix; 852 bus_info_ptr2->slots_at_66_pcix = bus_ptr->slots_at_66_pcix;
853 bus_info_ptr2->slots_at_100_pcix = bus_ptr->slots_at_100_pcix; 853 bus_info_ptr2->slots_at_100_pcix = bus_ptr->slots_at_100_pcix;
854 bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix; 854 bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix;
855 } 855 }
856 bus_ptr++; 856 bus_ptr++;
857 } 857 }
@@ -864,7 +864,7 @@ static int __init ebda_rsrc_controller (void)
864 hpc_ptr->u.pci_ctlr.dev_fun = readb (io_mem + addr + 1); 864 hpc_ptr->u.pci_ctlr.dev_fun = readb (io_mem + addr + 1);
865 hpc_ptr->irq = readb (io_mem + addr + 2); 865 hpc_ptr->irq = readb (io_mem + addr + 2);
866 addr += 3; 866 addr += 3;
867 debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n", 867 debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n",
868 hpc_ptr->u.pci_ctlr.bus, 868 hpc_ptr->u.pci_ctlr.bus,
869 hpc_ptr->u.pci_ctlr.dev_fun, hpc_ptr->irq); 869 hpc_ptr->u.pci_ctlr.dev_fun, hpc_ptr->irq);
870 break; 870 break;
@@ -932,7 +932,7 @@ static int __init ebda_rsrc_controller (void)
932 tmp_slot->supported_speed = 2; 932 tmp_slot->supported_speed = 2;
933 else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX) 933 else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX)
934 tmp_slot->supported_speed = 1; 934 tmp_slot->supported_speed = 1;
935 935
936 if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP) 936 if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP)
937 tmp_slot->supported_bus_mode = 1; 937 tmp_slot->supported_bus_mode = 1;
938 else 938 else
@@ -1000,7 +1000,7 @@ error_no_hpc:
1000 return rc; 1000 return rc;
1001} 1001}
1002 1002
1003/* 1003/*
1004 * map info (bus, devfun, start addr, end addr..) of i/o, memory, 1004 * map info (bus, devfun, start addr, end addr..) of i/o, memory,
1005 * pfm from the physical addr to a list of resource. 1005 * pfm from the physical addr to a list of resource.
1006 */ 1006 */
@@ -1057,7 +1057,7 @@ static int __init ebda_rsrc_rsrc (void)
1057 addr += 10; 1057 addr += 10;
1058 1058
1059 debug ("rsrc from mem or pfm ---\n"); 1059 debug ("rsrc from mem or pfm ---\n");
1060 debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", 1060 debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
1061 rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr); 1061 rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr);
1062 1062
1063 list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head); 1063 list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head);
@@ -1096,7 +1096,7 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num)
1096 struct bus_info *ptr; 1096 struct bus_info *ptr;
1097 1097
1098 list_for_each_entry(ptr, &bus_info_head, bus_info_list) { 1098 list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
1099 if (ptr->busno == num) 1099 if (ptr->busno == num)
1100 return ptr; 1100 return ptr;
1101 } 1101 }
1102 return NULL; 1102 return NULL;
@@ -1110,7 +1110,7 @@ int ibmphp_get_bus_index (u8 num)
1110 struct bus_info *ptr; 1110 struct bus_info *ptr;
1111 1111
1112 list_for_each_entry(ptr, &bus_info_head, bus_info_list) { 1112 list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
1113 if (ptr->busno == num) 1113 if (ptr->busno == num)
1114 return ptr->index; 1114 return ptr->index;
1115 } 1115 }
1116 return -ENODEV; 1116 return -ENODEV;
@@ -1168,7 +1168,7 @@ static struct pci_device_id id_table[] = {
1168 .subdevice = HPC_SUBSYSTEM_ID, 1168 .subdevice = HPC_SUBSYSTEM_ID,
1169 .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00), 1169 .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
1170 }, {} 1170 }, {}
1171}; 1171};
1172 1172
1173MODULE_DEVICE_TABLE(pci, id_table); 1173MODULE_DEVICE_TABLE(pci, id_table);
1174 1174
@@ -1197,7 +1197,7 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
1197 struct controller *ctrl; 1197 struct controller *ctrl;
1198 1198
1199 debug ("inside ibmphp_probe\n"); 1199 debug ("inside ibmphp_probe\n");
1200 1200
1201 list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { 1201 list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
1202 if (ctrl->ctlr_type == 1) { 1202 if (ctrl->ctlr_type == 1) {
1203 if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) { 1203 if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) {
@@ -1210,4 +1210,3 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
1210 } 1210 }
1211 return -ENODEV; 1211 return -ENODEV;
1212} 1212}
1213
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index f59ed30512b5..5fc7a089f532 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -258,7 +258,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
258{ 258{
259 u8 rc; 259 u8 rc;
260 void __iomem *wpg_addr; // base addr + offset 260 void __iomem *wpg_addr; // base addr + offset
261 unsigned long wpg_data; // data to/from WPG LOHI format 261 unsigned long wpg_data; // data to/from WPG LOHI format
262 unsigned long ultemp; 262 unsigned long ultemp;
263 unsigned long data; // actual data HILO format 263 unsigned long data; // actual data HILO format
264 int i; 264 int i;
@@ -351,7 +351,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
351} 351}
352 352
353//------------------------------------------------------------ 353//------------------------------------------------------------
354// Read from ISA type HPC 354// Read from ISA type HPC
355//------------------------------------------------------------ 355//------------------------------------------------------------
356static u8 isa_ctrl_read (struct controller *ctlr_ptr, u8 offset) 356static u8 isa_ctrl_read (struct controller *ctlr_ptr, u8 offset)
357{ 357{
@@ -372,7 +372,7 @@ static void isa_ctrl_write (struct controller *ctlr_ptr, u8 offset, u8 data)
372{ 372{
373 u16 start_address; 373 u16 start_address;
374 u16 port_address; 374 u16 port_address;
375 375
376 start_address = ctlr_ptr->u.isa_ctlr.io_start; 376 start_address = ctlr_ptr->u.isa_ctlr.io_start;
377 port_address = start_address + (u16) offset; 377 port_address = start_address + (u16) offset;
378 outb (data, port_address); 378 outb (data, port_address);
@@ -656,11 +656,11 @@ int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus)
656 //-------------------------------------------------------------------- 656 //--------------------------------------------------------------------
657 // cleanup 657 // cleanup
658 //-------------------------------------------------------------------- 658 //--------------------------------------------------------------------
659 659
660 // remove physical to logical address mapping 660 // remove physical to logical address mapping
661 if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4)) 661 if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4))
662 iounmap (wpg_bbar); 662 iounmap (wpg_bbar);
663 663
664 free_hpc_access (); 664 free_hpc_access ();
665 665
666 debug_polling ("%s - Exit rc[%d]\n", __func__, rc); 666 debug_polling ("%s - Exit rc[%d]\n", __func__, rc);
@@ -835,7 +835,7 @@ static int poll_hpc(void *data)
835 down (&semOperations); 835 down (&semOperations);
836 836
837 switch (poll_state) { 837 switch (poll_state) {
838 case POLL_LATCH_REGISTER: 838 case POLL_LATCH_REGISTER:
839 oldlatchlow = curlatchlow; 839 oldlatchlow = curlatchlow;
840 ctrl_count = 0x00; 840 ctrl_count = 0x00;
841 list_for_each (pslotlist, &ibmphp_slot_head) { 841 list_for_each (pslotlist, &ibmphp_slot_head) {
@@ -892,16 +892,16 @@ static int poll_hpc(void *data)
892 892
893 if (kthread_should_stop()) 893 if (kthread_should_stop())
894 goto out_sleep; 894 goto out_sleep;
895 895
896 down (&semOperations); 896 down (&semOperations);
897 897
898 if (poll_count >= POLL_LATCH_CNT) { 898 if (poll_count >= POLL_LATCH_CNT) {
899 poll_count = 0; 899 poll_count = 0;
900 poll_state = POLL_SLOTS; 900 poll_state = POLL_SLOTS;
901 } else 901 } else
902 poll_state = POLL_LATCH_REGISTER; 902 poll_state = POLL_LATCH_REGISTER;
903 break; 903 break;
904 } 904 }
905 /* give up the hardware semaphore */ 905 /* give up the hardware semaphore */
906 up (&semOperations); 906 up (&semOperations);
907 /* sleep for a short time just for good measure */ 907 /* sleep for a short time just for good measure */
@@ -958,7 +958,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
958 // bit 5 - HPC_SLOT_PWRGD 958 // bit 5 - HPC_SLOT_PWRGD
959 if ((pslot->status & 0x20) != (poldslot->status & 0x20)) 959 if ((pslot->status & 0x20) != (poldslot->status & 0x20))
960 // OFF -> ON: ignore, ON -> OFF: disable slot 960 // OFF -> ON: ignore, ON -> OFF: disable slot
961 if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) 961 if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status)))
962 disable = 1; 962 disable = 1;
963 963
964 // bit 6 - HPC_SLOT_BUS_SPEED 964 // bit 6 - HPC_SLOT_BUS_SPEED
@@ -980,7 +980,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
980 pslot->status &= ~HPC_SLOT_POWER; 980 pslot->status &= ~HPC_SLOT_POWER;
981 } 981 }
982 } 982 }
983 // CLOSE -> OPEN 983 // CLOSE -> OPEN
984 else if ((SLOT_PWRGD (poldslot->status) == HPC_SLOT_PWRGD_GOOD) 984 else if ((SLOT_PWRGD (poldslot->status) == HPC_SLOT_PWRGD_GOOD)
985 && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) { 985 && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) {
986 disable = 1; 986 disable = 1;
@@ -1075,7 +1075,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void)
1075 debug ("before locking operations \n"); 1075 debug ("before locking operations \n");
1076 ibmphp_lock_operations (); 1076 ibmphp_lock_operations ();
1077 debug ("after locking operations \n"); 1077 debug ("after locking operations \n");
1078 1078
1079 // wait for poll thread to exit 1079 // wait for poll thread to exit
1080 debug ("before sem_exit down \n"); 1080 debug ("before sem_exit down \n");
1081 down (&sem_exit); 1081 down (&sem_exit);
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index c60f5f3e838d..639ea3a75e14 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * IBM Hot Plug Controller Driver 2 * IBM Hot Plug Controller Driver
3 * 3 *
4 * Written By: Irene Zubarev, IBM Corporation 4 * Written By: Irene Zubarev, IBM Corporation
5 * 5 *
6 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) 6 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
7 * Copyright (C) 2001,2002 IBM Corp. 7 * Copyright (C) 2001,2002 IBM Corp.
8 * 8 *
@@ -42,7 +42,7 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno);
42 42
43/* 43/*
44 * NOTE..... If BIOS doesn't provide default routing, we assign: 44 * NOTE..... If BIOS doesn't provide default routing, we assign:
45 * 9 for SCSI, 10 for LAN adapters, and 11 for everything else. 45 * 9 for SCSI, 10 for LAN adapters, and 11 for everything else.
46 * If adapter is bridged, then we assign 11 to it and devices behind it. 46 * If adapter is bridged, then we assign 11 to it and devices behind it.
47 * We also assign the same irq numbers for multi function devices. 47 * We also assign the same irq numbers for multi function devices.
48 * These are PIC mode, so shouldn't matter n.e.ways (hopefully) 48 * These are PIC mode, so shouldn't matter n.e.ways (hopefully)
@@ -71,11 +71,11 @@ static void assign_alt_irq (struct pci_func * cur_func, u8 class_code)
71 * Configures the device to be added (will allocate needed resources if it 71 * Configures the device to be added (will allocate needed resources if it
72 * can), the device can be a bridge or a regular pci device, can also be 72 * can), the device can be a bridge or a regular pci device, can also be
73 * multi-functional 73 * multi-functional
74 * 74 *
75 * Input: function to be added 75 * Input: function to be added
76 * 76 *
77 * TO DO: The error case with Multifunction device or multi function bridge, 77 * TO DO: The error case with Multifunction device or multi function bridge,
78 * if there is an error, will need to go through all previous functions and 78 * if there is an error, will need to go through all previous functions and
79 * unconfigure....or can add some code into unconfigure_card.... 79 * unconfigure....or can add some code into unconfigure_card....
80 */ 80 */
81int ibmphp_configure_card (struct pci_func *func, u8 slotno) 81int ibmphp_configure_card (struct pci_func *func, u8 slotno)
@@ -98,7 +98,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
98 cur_func = func; 98 cur_func = func;
99 99
100 /* We only get bus and device from IRQ routing table. So at this point, 100 /* We only get bus and device from IRQ routing table. So at this point,
101 * func->busno is correct, and func->device contains only device (at the 5 101 * func->busno is correct, and func->device contains only device (at the 5
102 * highest bits) 102 * highest bits)
103 */ 103 */
104 104
@@ -151,7 +151,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
151 cur_func->device, cur_func->busno); 151 cur_func->device, cur_func->busno);
152 cleanup_count = 6; 152 cleanup_count = 6;
153 goto error; 153 goto error;
154 } 154 }
155 cur_func->next = NULL; 155 cur_func->next = NULL;
156 function = 0x8; 156 function = 0x8;
157 break; 157 break;
@@ -339,7 +339,7 @@ error:
339} 339}
340 340
341/* 341/*
342 * This function configures the pci BARs of a single device. 342 * This function configures the pci BARs of a single device.
343 * Input: pointer to the pci_func 343 * Input: pointer to the pci_func
344 * Output: configured PCI, 0, or error 344 * Output: configured PCI, 0, or error
345 */ 345 */
@@ -371,17 +371,17 @@ static int configure_device (struct pci_func *func)
371 371
372 for (count = 0; address[count]; count++) { /* for 6 BARs */ 372 for (count = 0; address[count]; count++) { /* for 6 BARs */
373 373
374 /* not sure if i need this. per scott, said maybe need smth like this 374 /* not sure if i need this. per scott, said maybe need * something like this
375 if devices don't adhere 100% to the spec, so don't want to write 375 if devices don't adhere 100% to the spec, so don't want to write
376 to the reserved bits 376 to the reserved bits
377 377
378 pcibios_read_config_byte(cur_func->busno, cur_func->device, 378 pcibios_read_config_byte(cur_func->busno, cur_func->device,
379 PCI_BASE_ADDRESS_0 + 4 * count, &tmp); 379 PCI_BASE_ADDRESS_0 + 4 * count, &tmp);
380 if (tmp & 0x01) // IO 380 if (tmp & 0x01) // IO
381 pcibios_write_config_dword(cur_func->busno, cur_func->device, 381 pcibios_write_config_dword(cur_func->busno, cur_func->device,
382 PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFD); 382 PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFD);
383 else // Memory 383 else // Memory
384 pcibios_write_config_dword(cur_func->busno, cur_func->device, 384 pcibios_write_config_dword(cur_func->busno, cur_func->device,
385 PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFF); 385 PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFF);
386 */ 386 */
387 pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF); 387 pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF);
@@ -421,8 +421,8 @@ static int configure_device (struct pci_func *func)
421 return -EIO; 421 return -EIO;
422 } 422 }
423 pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start); 423 pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start);
424 424
425 /* _______________This is for debugging purposes only_____________________ */ 425 /* _______________This is for debugging purposes only_____________________ */
426 debug ("b4 writing, the IO address is %x\n", func->io[count]->start); 426 debug ("b4 writing, the IO address is %x\n", func->io[count]->start);
427 pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]); 427 pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
428 debug ("after writing.... the start address is %x\n", bar[count]); 428 debug ("after writing.... the start address is %x\n", bar[count]);
@@ -484,7 +484,7 @@ static int configure_device (struct pci_func *func)
484 484
485 pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start); 485 pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start);
486 486
487 /*_______________This is for debugging purposes only______________________________*/ 487 /*_______________This is for debugging purposes only______________________________*/
488 debug ("b4 writing, start address is %x\n", func->pfmem[count]->start); 488 debug ("b4 writing, start address is %x\n", func->pfmem[count]->start);
489 pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]); 489 pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
490 debug ("after writing, start address is %x\n", bar[count]); 490 debug ("after writing, start address is %x\n", bar[count]);
@@ -559,7 +559,7 @@ static int configure_device (struct pci_func *func)
559/****************************************************************************** 559/******************************************************************************
560 * This routine configures a PCI-2-PCI bridge and the functions behind it 560 * This routine configures a PCI-2-PCI bridge and the functions behind it
561 * Parameters: pci_func 561 * Parameters: pci_func
562 * Returns: 562 * Returns:
563 ******************************************************************************/ 563 ******************************************************************************/
564static int configure_bridge (struct pci_func **func_passed, u8 slotno) 564static int configure_bridge (struct pci_func **func_passed, u8 slotno)
565{ 565{
@@ -622,7 +622,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
622 debug ("AFTER FIND_SEC_NUMBER, func->busno IS %x\n", func->busno); 622 debug ("AFTER FIND_SEC_NUMBER, func->busno IS %x\n", func->busno);
623 623
624 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, sec_number); 624 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, sec_number);
625 625
626 /* __________________For debugging purposes only __________________________________ 626 /* __________________For debugging purposes only __________________________________
627 pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number); 627 pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number);
628 debug ("sec_number after write/read is %x\n", sec_number); 628 debug ("sec_number after write/read is %x\n", sec_number);
@@ -644,7 +644,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
644 644
645 645
646 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 646 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
647 !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!! 647 !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!!
648 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/ 648 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
649 649
650 650
@@ -670,7 +670,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
670 debug ("len[count] in IO = %x\n", len[count]); 670 debug ("len[count] in IO = %x\n", len[count]);
671 671
672 bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 672 bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
673 673
674 if (!bus_io[count]) { 674 if (!bus_io[count]) {
675 err ("out of system memory\n"); 675 err ("out of system memory\n");
676 retval = -ENOMEM; 676 retval = -ENOMEM;
@@ -735,7 +735,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
735 ibmphp_add_pfmem_from_mem (bus_pfmem[count]); 735 ibmphp_add_pfmem_from_mem (bus_pfmem[count]);
736 func->pfmem[count] = bus_pfmem[count]; 736 func->pfmem[count] = bus_pfmem[count];
737 } else { 737 } else {
738 err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n", 738 err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n",
739 func->busno, func->device, len[count]); 739 func->busno, func->device, len[count]);
740 kfree (mem_tmp); 740 kfree (mem_tmp);
741 kfree (bus_pfmem[count]); 741 kfree (bus_pfmem[count]);
@@ -805,7 +805,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
805 debug ("amount_needed->mem = %x\n", amount_needed->mem); 805 debug ("amount_needed->mem = %x\n", amount_needed->mem);
806 debug ("amount_needed->pfmem = %x\n", amount_needed->pfmem); 806 debug ("amount_needed->pfmem = %x\n", amount_needed->pfmem);
807 807
808 if (amount_needed->not_correct) { 808 if (amount_needed->not_correct) {
809 debug ("amount_needed is not correct\n"); 809 debug ("amount_needed is not correct\n");
810 for (count = 0; address[count]; count++) { 810 for (count = 0; address[count]; count++) {
811 /* for 2 BARs */ 811 /* for 2 BARs */
@@ -830,7 +830,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
830 } else { 830 } else {
831 debug ("it wants %x IO behind the bridge\n", amount_needed->io); 831 debug ("it wants %x IO behind the bridge\n", amount_needed->io);
832 io = kzalloc(sizeof(*io), GFP_KERNEL); 832 io = kzalloc(sizeof(*io), GFP_KERNEL);
833 833
834 if (!io) { 834 if (!io) {
835 err ("out of system memory\n"); 835 err ("out of system memory\n");
836 retval = -ENOMEM; 836 retval = -ENOMEM;
@@ -959,7 +959,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
959 959
960 if (bus->noIORanges) { 960 if (bus->noIORanges) {
961 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00 | bus->rangeIO->start >> 8); 961 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00 | bus->rangeIO->start >> 8);
962 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8); 962 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8);
963 963
964 /* _______________This is for debugging purposes only ____________________ 964 /* _______________This is for debugging purposes only ____________________
965 pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &temp); 965 pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &temp);
@@ -980,7 +980,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
980 if (bus->noMemRanges) { 980 if (bus->noMemRanges) {
981 pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0x0000 | bus->rangeMem->start >> 16); 981 pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0x0000 | bus->rangeMem->start >> 16);
982 pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000 | bus->rangeMem->end >> 16); 982 pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000 | bus->rangeMem->end >> 16);
983 983
984 /* ____________________This is for debugging purposes only ________________________ 984 /* ____________________This is for debugging purposes only ________________________
985 pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &temp); 985 pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &temp);
986 debug ("mem_base = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16); 986 debug ("mem_base = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16);
@@ -1017,7 +1017,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
1017 pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq); 1017 pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq);
1018 if ((irq > 0x00) && (irq < 0x05)) 1018 if ((irq > 0x00) && (irq < 0x05))
1019 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]); 1019 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]);
1020 /* 1020 /*
1021 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, ctrl); 1021 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, ctrl);
1022 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_PARITY); 1022 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_PARITY);
1023 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR); 1023 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR);
@@ -1071,7 +1071,7 @@ error:
1071 * This function adds up the amount of resources needed behind the PPB bridge 1071 * This function adds up the amount of resources needed behind the PPB bridge
1072 * and passes it to the configure_bridge function 1072 * and passes it to the configure_bridge function
1073 * Input: bridge function 1073 * Input: bridge function
1074 * Ouput: amount of resources needed 1074 * Output: amount of resources needed
1075 *****************************************************************************/ 1075 *****************************************************************************/
1076static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno) 1076static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
1077{ 1077{
@@ -1204,9 +1204,9 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
1204 return amount; 1204 return amount;
1205} 1205}
1206 1206
1207/* The following 3 unconfigure_boot_ routines deal with the case when we had the card 1207/* The following 3 unconfigure_boot_ routines deal with the case when we had the card
1208 * upon bootup in the system, since we don't allocate func to such case, we need to read 1208 * upon bootup in the system, since we don't allocate func to such case, we need to read
1209 * the start addresses from pci config space and then find the corresponding entries in 1209 * the start addresses from pci config space and then find the corresponding entries in
1210 * our resource lists. The functions return either 0, -ENODEV, or -1 (general failure) 1210 * our resource lists. The functions return either 0, -ENODEV, or -1 (general failure)
1211 * Change: we also call these functions even if we configured the card ourselves (i.e., not 1211 * Change: we also call these functions even if we configured the card ourselves (i.e., not
1212 * the bootup case), since it should work same way 1212 * the bootup case), since it should work same way
@@ -1561,8 +1561,8 @@ static int unconfigure_boot_card (struct slot *slot_cur)
1561 * unconfiguring the device 1561 * unconfiguring the device
1562 * TO DO: will probably need to add some code in case there was some resource, 1562 * TO DO: will probably need to add some code in case there was some resource,
1563 * to remove it... this is from when we have errors in the configure_card... 1563 * to remove it... this is from when we have errors in the configure_card...
1564 * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!! 1564 * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!!
1565 * Returns: 0, -1, -ENODEV 1565 * Returns: 0, -1, -ENODEV
1566 */ 1566 */
1567int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end) 1567int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end)
1568{ 1568{
@@ -1634,7 +1634,7 @@ int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end)
1634 * Input: bus and the amount of resources needed (we know we can assign those, 1634 * Input: bus and the amount of resources needed (we know we can assign those,
1635 * since they've been checked already 1635 * since they've been checked already
1636 * Output: bus added to the correct spot 1636 * Output: bus added to the correct spot
1637 * 0, -1, error 1637 * 0, -1, error
1638 */ 1638 */
1639static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct resource_node *mem, struct resource_node *pfmem, u8 parent_busno) 1639static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct resource_node *mem, struct resource_node *pfmem, u8 parent_busno)
1640{ 1640{
@@ -1650,7 +1650,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r
1650 err ("strange, cannot find bus which is supposed to be at the system... something is terribly wrong...\n"); 1650 err ("strange, cannot find bus which is supposed to be at the system... something is terribly wrong...\n");
1651 return -ENODEV; 1651 return -ENODEV;
1652 } 1652 }
1653 1653
1654 list_add (&bus->bus_list, &cur_bus->bus_list); 1654 list_add (&bus->bus_list, &cur_bus->bus_list);
1655 } 1655 }
1656 if (io) { 1656 if (io) {
@@ -1679,7 +1679,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r
1679 } 1679 }
1680 if (pfmem) { 1680 if (pfmem) {
1681 pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL); 1681 pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL);
1682 if (!pfmem_range) { 1682 if (!pfmem_range) {
1683 err ("out of system memory\n"); 1683 err ("out of system memory\n");
1684 return -ENOMEM; 1684 return -ENOMEM;
1685 } 1685 }
@@ -1726,4 +1726,3 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno)
1726 return busno; 1726 return busno;
1727 return 0xff; 1727 return 0xff;
1728} 1728}
1729
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index e2dc289f767c..a265acb2d518 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -72,7 +72,7 @@ static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8
72static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * curr) 72static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * curr)
73{ 73{
74 struct resource_node *rs; 74 struct resource_node *rs;
75 75
76 if (!curr) { 76 if (!curr) {
77 err ("NULL passed to allocate\n"); 77 err ("NULL passed to allocate\n");
78 return NULL; 78 return NULL;
@@ -128,7 +128,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
128 } 128 }
129 newrange->start = curr->start_addr; 129 newrange->start = curr->start_addr;
130 newrange->end = curr->end_addr; 130 newrange->end = curr->end_addr;
131 131
132 if (first_bus || (!num_ranges)) 132 if (first_bus || (!num_ranges))
133 newrange->rangeno = 1; 133 newrange->rangeno = 1;
134 else { 134 else {
@@ -162,7 +162,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
162 newbus->rangePFMem = newrange; 162 newbus->rangePFMem = newrange;
163 if (first_bus) 163 if (first_bus)
164 newbus->noPFMemRanges = 1; 164 newbus->noPFMemRanges = 1;
165 else { 165 else {
166 debug ("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 166 debug ("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
167 ++newbus->noPFMemRanges; 167 ++newbus->noPFMemRanges;
168 fix_resources (newbus); 168 fix_resources (newbus);
@@ -190,7 +190,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
190 * This is the Resource Management initialization function. It will go through 190 * This is the Resource Management initialization function. It will go through
191 * the Resource list taken from EBDA and fill in this module's data structures 191 * the Resource list taken from EBDA and fill in this module's data structures
192 * 192 *
193 * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES, 193 * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES,
194 * SINCE WE'RE GOING TO ASSUME FOR NOW WE DON'T HAVE THOSE ON OUR BUSES FOR NOW 194 * SINCE WE'RE GOING TO ASSUME FOR NOW WE DON'T HAVE THOSE ON OUR BUSES FOR NOW
195 * 195 *
196 * Input: ptr to the head of the resource list from EBDA 196 * Input: ptr to the head of the resource list from EBDA
@@ -382,7 +382,7 @@ int __init ibmphp_rsrc_init (void)
382 * pci devices' resources for the appropriate resource 382 * pci devices' resources for the appropriate resource
383 * 383 *
384 * Input: type of the resource, range to add, current bus 384 * Input: type of the resource, range to add, current bus
385 * Output: 0 or -1, bus and range ptrs 385 * Output: 0 or -1, bus and range ptrs
386 ********************************************************************************/ 386 ********************************************************************************/
387static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur) 387static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur)
388{ 388{
@@ -466,7 +466,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno)
466 466
467 switch (type) { 467 switch (type) {
468 case MEM: 468 case MEM:
469 if (bus_cur->firstMem) 469 if (bus_cur->firstMem)
470 res = bus_cur->firstMem; 470 res = bus_cur->firstMem;
471 break; 471 break;
472 case PFMEM: 472 case PFMEM:
@@ -583,7 +583,7 @@ static void fix_resources (struct bus_node *bus_cur)
583} 583}
584 584
585/******************************************************************************* 585/*******************************************************************************
586 * This routine adds a resource to the list of resources to the appropriate bus 586 * This routine adds a resource to the list of resources to the appropriate bus
587 * based on their resource type and sorted by their starting addresses. It assigns 587 * based on their resource type and sorted by their starting addresses. It assigns
588 * the ptrs to next and nextRange if needed. 588 * the ptrs to next and nextRange if needed.
589 * 589 *
@@ -605,11 +605,11 @@ int ibmphp_add_resource (struct resource_node *res)
605 err ("NULL passed to add\n"); 605 err ("NULL passed to add\n");
606 return -ENODEV; 606 return -ENODEV;
607 } 607 }
608 608
609 bus_cur = find_bus_wprev (res->busno, NULL, 0); 609 bus_cur = find_bus_wprev (res->busno, NULL, 0);
610 610
611 if (!bus_cur) { 611 if (!bus_cur) {
612 /* didn't find a bus, smth's wrong!!! */ 612 /* didn't find a bus, something's wrong!!! */
613 debug ("no bus in the system, either pci_dev's wrong or allocation failed\n"); 613 debug ("no bus in the system, either pci_dev's wrong or allocation failed\n");
614 return -ENODEV; 614 return -ENODEV;
615 } 615 }
@@ -648,7 +648,7 @@ int ibmphp_add_resource (struct resource_node *res)
648 if (!range_cur) { 648 if (!range_cur) {
649 switch (res->type) { 649 switch (res->type) {
650 case IO: 650 case IO:
651 ++bus_cur->needIOUpdate; 651 ++bus_cur->needIOUpdate;
652 break; 652 break;
653 case MEM: 653 case MEM:
654 ++bus_cur->needMemUpdate; 654 ++bus_cur->needMemUpdate;
@@ -659,13 +659,13 @@ int ibmphp_add_resource (struct resource_node *res)
659 } 659 }
660 res->rangeno = -1; 660 res->rangeno = -1;
661 } 661 }
662 662
663 debug ("The range is %d\n", res->rangeno); 663 debug ("The range is %d\n", res->rangeno);
664 if (!res_start) { 664 if (!res_start) {
665 /* no first{IO,Mem,Pfmem} on the bus, 1st IO/Mem/Pfmem resource ever */ 665 /* no first{IO,Mem,Pfmem} on the bus, 1st IO/Mem/Pfmem resource ever */
666 switch (res->type) { 666 switch (res->type) {
667 case IO: 667 case IO:
668 bus_cur->firstIO = res; 668 bus_cur->firstIO = res;
669 break; 669 break;
670 case MEM: 670 case MEM:
671 bus_cur->firstMem = res; 671 bus_cur->firstMem = res;
@@ -673,7 +673,7 @@ int ibmphp_add_resource (struct resource_node *res)
673 case PFMEM: 673 case PFMEM:
674 bus_cur->firstPFMem = res; 674 bus_cur->firstPFMem = res;
675 break; 675 break;
676 } 676 }
677 res->next = NULL; 677 res->next = NULL;
678 res->nextRange = NULL; 678 res->nextRange = NULL;
679 } else { 679 } else {
@@ -770,7 +770,7 @@ int ibmphp_add_resource (struct resource_node *res)
770 * This routine will remove the resource from the list of resources 770 * This routine will remove the resource from the list of resources
771 * 771 *
772 * Input: io, mem, and/or pfmem resource to be deleted 772 * Input: io, mem, and/or pfmem resource to be deleted
773 * Ouput: modified resource list 773 * Output: modified resource list
774 * 0 or error code 774 * 0 or error code
775 ****************************************************************************/ 775 ****************************************************************************/
776int ibmphp_remove_resource (struct resource_node *res) 776int ibmphp_remove_resource (struct resource_node *res)
@@ -825,7 +825,7 @@ int ibmphp_remove_resource (struct resource_node *res)
825 825
826 if (!res_cur) { 826 if (!res_cur) {
827 if (res->type == PFMEM) { 827 if (res->type == PFMEM) {
828 /* 828 /*
829 * case where pfmem might be in the PFMemFromMem list 829 * case where pfmem might be in the PFMemFromMem list
830 * so will also need to remove the corresponding mem 830 * so will also need to remove the corresponding mem
831 * entry 831 * entry
@@ -961,12 +961,12 @@ static struct range_node * find_range (struct bus_node *bus_cur, struct resource
961} 961}
962 962
963/***************************************************************************** 963/*****************************************************************************
964 * This routine will check to make sure the io/mem/pfmem->len that the device asked for 964 * This routine will check to make sure the io/mem/pfmem->len that the device asked for
965 * can fit w/i our list of available IO/MEM/PFMEM resources. If cannot, returns -EINVAL, 965 * can fit w/i our list of available IO/MEM/PFMEM resources. If cannot, returns -EINVAL,
966 * otherwise, returns 0 966 * otherwise, returns 0
967 * 967 *
968 * Input: resource 968 * Input: resource
969 * Ouput: the correct start and end address are inputted into the resource node, 969 * Output: the correct start and end address are inputted into the resource node,
970 * 0 or -EINVAL 970 * 0 or -EINVAL
971 *****************************************************************************/ 971 *****************************************************************************/
972int ibmphp_check_resource (struct resource_node *res, u8 bridge) 972int ibmphp_check_resource (struct resource_node *res, u8 bridge)
@@ -996,7 +996,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
996 bus_cur = find_bus_wprev (res->busno, NULL, 0); 996 bus_cur = find_bus_wprev (res->busno, NULL, 0);
997 997
998 if (!bus_cur) { 998 if (!bus_cur) {
999 /* didn't find a bus, smth's wrong!!! */ 999 /* didn't find a bus, something's wrong!!! */
1000 debug ("no bus in the system, either pci_dev's wrong or allocation failed\n"); 1000 debug ("no bus in the system, either pci_dev's wrong or allocation failed\n");
1001 return -EINVAL; 1001 return -EINVAL;
1002 } 1002 }
@@ -1066,7 +1066,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1066 break; 1066 break;
1067 } 1067 }
1068 } 1068 }
1069 1069
1070 if (flag && len_cur == res->len) { 1070 if (flag && len_cur == res->len) {
1071 debug ("but we are not here, right?\n"); 1071 debug ("but we are not here, right?\n");
1072 res->start = start_cur; 1072 res->start = start_cur;
@@ -1118,10 +1118,10 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1118 if (res_prev) { 1118 if (res_prev) {
1119 if (res_prev->rangeno != res_cur->rangeno) { 1119 if (res_prev->rangeno != res_cur->rangeno) {
1120 /* 1st device on this range */ 1120 /* 1st device on this range */
1121 if ((res_cur->start != range->start) && 1121 if ((res_cur->start != range->start) &&
1122 ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) { 1122 ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) {
1123 if ((len_tmp < len_cur) || (len_cur == 0)) { 1123 if ((len_tmp < len_cur) || (len_cur == 0)) {
1124 if ((range->start % tmp_divide) == 0) { 1124 if ((range->start % tmp_divide) == 0) {
1125 /* just perfect, starting address is divisible by length */ 1125 /* just perfect, starting address is divisible by length */
1126 flag = 1; 1126 flag = 1;
1127 len_cur = len_tmp; 1127 len_cur = len_tmp;
@@ -1344,7 +1344,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1344 * This routine is called from remove_card if the card contained PPB. 1344 * This routine is called from remove_card if the card contained PPB.
1345 * It will remove all the resources on the bus as well as the bus itself 1345 * It will remove all the resources on the bus as well as the bus itself
1346 * Input: Bus 1346 * Input: Bus
1347 * Ouput: 0, -ENODEV 1347 * Output: 0, -ENODEV
1348 ********************************************************************************/ 1348 ********************************************************************************/
1349int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno) 1349int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
1350{ 1350{
@@ -1353,7 +1353,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
1353 struct bus_node *prev_bus; 1353 struct bus_node *prev_bus;
1354 int rc; 1354 int rc;
1355 1355
1356 prev_bus = find_bus_wprev (parent_busno, NULL, 0); 1356 prev_bus = find_bus_wprev (parent_busno, NULL, 0);
1357 1357
1358 if (!prev_bus) { 1358 if (!prev_bus) {
1359 debug ("something terribly wrong. Cannot find parent bus to the one to remove\n"); 1359 debug ("something terribly wrong. Cannot find parent bus to the one to remove\n");
@@ -1424,7 +1424,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
1424} 1424}
1425 1425
1426/****************************************************************************** 1426/******************************************************************************
1427 * This routine deletes the ranges from a given bus, and the entries from the 1427 * This routine deletes the ranges from a given bus, and the entries from the
1428 * parent's bus in the resources 1428 * parent's bus in the resources
1429 * Input: current bus, previous bus 1429 * Input: current bus, previous bus
1430 * Output: 0, -EINVAL 1430 * Output: 0, -EINVAL
@@ -1453,7 +1453,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
1453 if (bus_cur->noMemRanges) { 1453 if (bus_cur->noMemRanges) {
1454 range_cur = bus_cur->rangeMem; 1454 range_cur = bus_cur->rangeMem;
1455 for (i = 0; i < bus_cur->noMemRanges; i++) { 1455 for (i = 0; i < bus_cur->noMemRanges; i++) {
1456 if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0) 1456 if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0)
1457 return -EINVAL; 1457 return -EINVAL;
1458 1458
1459 ibmphp_remove_resource (res); 1459 ibmphp_remove_resource (res);
@@ -1467,7 +1467,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
1467 if (bus_cur->noPFMemRanges) { 1467 if (bus_cur->noPFMemRanges) {
1468 range_cur = bus_cur->rangePFMem; 1468 range_cur = bus_cur->rangePFMem;
1469 for (i = 0; i < bus_cur->noPFMemRanges; i++) { 1469 for (i = 0; i < bus_cur->noPFMemRanges; i++) {
1470 if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0) 1470 if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0)
1471 return -EINVAL; 1471 return -EINVAL;
1472 1472
1473 ibmphp_remove_resource (res); 1473 ibmphp_remove_resource (res);
@@ -1482,7 +1482,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
1482} 1482}
1483 1483
1484/* 1484/*
1485 * find the resource node in the bus 1485 * find the resource node in the bus
1486 * Input: Resource needed, start address of the resource, type of resource 1486 * Input: Resource needed, start address of the resource, type of resource
1487 */ 1487 */
1488int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resource_node **res, int flag) 1488int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resource_node **res, int flag)
@@ -1512,7 +1512,7 @@ int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resour
1512 err ("wrong type of flag\n"); 1512 err ("wrong type of flag\n");
1513 return -EINVAL; 1513 return -EINVAL;
1514 } 1514 }
1515 1515
1516 while (res_cur) { 1516 while (res_cur) {
1517 if (res_cur->start == start_address) { 1517 if (res_cur->start == start_address) {
1518 *res = res_cur; 1518 *res = res_cur;
@@ -1718,7 +1718,7 @@ static int __init once_over (void)
1718 } /* end for pfmem */ 1718 } /* end for pfmem */
1719 } /* end if */ 1719 } /* end if */
1720 } /* end list_for_each bus */ 1720 } /* end list_for_each bus */
1721 return 0; 1721 return 0;
1722} 1722}
1723 1723
1724int ibmphp_add_pfmem_from_mem (struct resource_node *pfmem) 1724int ibmphp_add_pfmem_from_mem (struct resource_node *pfmem)
@@ -1760,9 +1760,9 @@ static struct bus_node *find_bus_wprev (u8 bus_number, struct bus_node **prev, u
1760 list_for_each (tmp, &gbuses) { 1760 list_for_each (tmp, &gbuses) {
1761 tmp_prev = tmp->prev; 1761 tmp_prev = tmp->prev;
1762 bus_cur = list_entry (tmp, struct bus_node, bus_list); 1762 bus_cur = list_entry (tmp, struct bus_node, bus_list);
1763 if (flag) 1763 if (flag)
1764 *prev = list_entry (tmp_prev, struct bus_node, bus_list); 1764 *prev = list_entry (tmp_prev, struct bus_node, bus_list);
1765 if (bus_cur->busno == bus_number) 1765 if (bus_cur->busno == bus_number)
1766 return bus_cur; 1766 return bus_cur;
1767 } 1767 }
1768 1768
@@ -1776,7 +1776,7 @@ void ibmphp_print_test (void)
1776 struct range_node *range; 1776 struct range_node *range;
1777 struct resource_node *res; 1777 struct resource_node *res;
1778 struct list_head *tmp; 1778 struct list_head *tmp;
1779 1779
1780 debug_pci ("*****************START**********************\n"); 1780 debug_pci ("*****************START**********************\n");
1781 1781
1782 if ((!list_empty(&gbuses)) && flags) { 1782 if ((!list_empty(&gbuses)) && flags) {
@@ -1906,7 +1906,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu
1906 return 1; 1906 return 1;
1907 range_cur = range_cur->next; 1907 range_cur = range_cur->next;
1908 } 1908 }
1909 1909
1910 return 0; 1910 return 0;
1911} 1911}
1912 1912
@@ -1920,7 +1920,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu
1920 * Returns: none 1920 * Returns: none
1921 * Note: this function doesn't take into account IO restrictions etc, 1921 * Note: this function doesn't take into account IO restrictions etc,
1922 * so will only work for bridges with no video/ISA devices behind them It 1922 * so will only work for bridges with no video/ISA devices behind them It
1923 * also will not work for onboard PPB's that can have more than 1 *bus 1923 * also will not work for onboard PPBs that can have more than 1 *bus
1924 * behind them All these are TO DO. 1924 * behind them All these are TO DO.
1925 * Also need to add more error checkings... (from fnc returns etc) 1925 * Also need to add more error checkings... (from fnc returns etc)
1926 */ 1926 */
@@ -1963,7 +1963,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
1963 case PCI_HEADER_TYPE_BRIDGE: 1963 case PCI_HEADER_TYPE_BRIDGE:
1964 function = 0x8; 1964 function = 0x8;
1965 case PCI_HEADER_TYPE_MULTIBRIDGE: 1965 case PCI_HEADER_TYPE_MULTIBRIDGE:
1966 /* We assume here that only 1 bus behind the bridge 1966 /* We assume here that only 1 bus behind the bridge
1967 TO DO: add functionality for several: 1967 TO DO: add functionality for several:
1968 temp = secondary; 1968 temp = secondary;
1969 while (temp < subordinate) { 1969 while (temp < subordinate) {
@@ -1972,7 +1972,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
1972 } 1972 }
1973 */ 1973 */
1974 pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_busno); 1974 pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_busno);
1975 bus_sec = find_bus_wprev (sec_busno, NULL, 0); 1975 bus_sec = find_bus_wprev (sec_busno, NULL, 0);
1976 /* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */ 1976 /* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */
1977 if (!bus_sec) { 1977 if (!bus_sec) {
1978 bus_sec = alloc_error_bus (NULL, sec_busno, 1); 1978 bus_sec = alloc_error_bus (NULL, sec_busno, 1);
@@ -2028,7 +2028,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
2028 io->len = io->end - io->start + 1; 2028 io->len = io->end - io->start + 1;
2029 ibmphp_add_resource (io); 2029 ibmphp_add_resource (io);
2030 } 2030 }
2031 } 2031 }
2032 2032
2033 pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &start_mem_address); 2033 pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &start_mem_address);
2034 pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &end_mem_address); 2034 pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &end_mem_address);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index ec20f74c8981..cfa92a984e62 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -131,7 +131,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
131 } 131 }
132 module_put(slot->ops->owner); 132 module_put(slot->ops->owner);
133 133
134exit: 134exit:
135 if (retval) 135 if (retval)
136 return retval; 136 return retval;
137 return count; 137 return count;
@@ -177,7 +177,7 @@ static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
177 retval = ops->set_attention_status(slot->hotplug, attention); 177 retval = ops->set_attention_status(slot->hotplug, attention);
178 module_put(ops->owner); 178 module_put(ops->owner);
179 179
180exit: 180exit:
181 if (retval) 181 if (retval)
182 return retval; 182 return retval;
183 return count; 183 return count;
@@ -247,7 +247,7 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
247 retval = slot->ops->hardware_test(slot, test); 247 retval = slot->ops->hardware_test(slot, test);
248 module_put(slot->ops->owner); 248 module_put(slot->ops->owner);
249 249
250exit: 250exit:
251 if (retval) 251 if (retval)
252 return retval; 252 return retval;
253 return count; 253 return count;
@@ -512,7 +512,7 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
512 * @hotplug: pointer to the slot whose info has changed 512 * @hotplug: pointer to the slot whose info has changed
513 * @info: pointer to the info copy into the slot's info structure 513 * @info: pointer to the info copy into the slot's info structure
514 * 514 *
515 * @slot must have been registered with the pci 515 * @slot must have been registered with the pci
516 * hotplug subsystem previously with a call to pci_hp_register(). 516 * hotplug subsystem previously with a call to pci_hp_register().
517 * 517 *
518 * Returns 0 if successful, anything else for an error. 518 * Returns 0 if successful, anything else for an error.
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 541bbe6d5343..21e865ded1dc 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -180,5 +180,5 @@ static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
180{ 180{
181 return 0; 181 return 0;
182} 182}
183#endif /* CONFIG_ACPI */ 183#endif /* CONFIG_ACPI */
184#endif /* _PCIEHP_H */ 184#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index ead7c534095e..eddddd447d0d 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -54,7 +54,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
54{ 54{
55 if (slot_detection_mode != PCIEHP_DETECT_ACPI) 55 if (slot_detection_mode != PCIEHP_DETECT_ACPI)
56 return 0; 56 return 0;
57 if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev))) 57 if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev)))
58 return 0; 58 return 0;
59 return -ENODEV; 59 return -ENODEV;
60} 60}
@@ -78,7 +78,7 @@ static int __initdata dup_slot_id;
78static int __initdata acpi_slot_detected; 78static int __initdata acpi_slot_detected;
79static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); 79static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
80 80
81/* Dummy driver for dumplicate name detection */ 81/* Dummy driver for duplicate name detection */
82static int __init dummy_probe(struct pcie_device *dev) 82static int __init dummy_probe(struct pcie_device *dev)
83{ 83{
84 u32 slot_cap; 84 u32 slot_cap;
@@ -96,7 +96,7 @@ static int __init dummy_probe(struct pcie_device *dev)
96 dup_slot_id++; 96 dup_slot_id++;
97 } 97 }
98 list_add_tail(&slot->list, &dummy_slots); 98 list_add_tail(&slot->list, &dummy_slots);
99 handle = DEVICE_ACPI_HANDLE(&pdev->dev); 99 handle = ACPI_HANDLE(&pdev->dev);
100 if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) 100 if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
101 acpi_slot_detected = 1; 101 acpi_slot_detected = 1;
102 return -ENODEV; /* dummy driver always returns error */ 102 return -ENODEV; /* dummy driver always returns error */
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index f4a18f51a29c..bbd48bbe4e9b 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -351,8 +351,8 @@ static int __init pcied_init(void)
351 351
352 pciehp_firmware_init(); 352 pciehp_firmware_init();
353 retval = pcie_port_service_register(&hpdriver_portdrv); 353 retval = pcie_port_service_register(&hpdriver_portdrv);
354 dbg("pcie_port_service_register = %d\n", retval); 354 dbg("pcie_port_service_register = %d\n", retval);
355 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 355 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
356 if (retval) 356 if (retval)
357 dbg("Failure to register service\n"); 357 dbg("Failure to register service\n");
358 358
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 51f56ef4ab6f..3eea3fdd4b0b 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -92,7 +92,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
92{ 92{
93 /* Clamp to sane value */ 93 /* Clamp to sane value */
94 if ((sec <= 0) || (sec > 60)) 94 if ((sec <= 0) || (sec > 60))
95 sec = 2; 95 sec = 2;
96 96
97 ctrl->poll_timer.function = &int_poll_timeout; 97 ctrl->poll_timer.function = &int_poll_timeout;
98 ctrl->poll_timer.data = (unsigned long)ctrl; 98 ctrl->poll_timer.data = (unsigned long)ctrl;
@@ -194,7 +194,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
194 ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n"); 194 ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
195 } else if (!NO_CMD_CMPL(ctrl)) { 195 } else if (!NO_CMD_CMPL(ctrl)) {
196 /* 196 /*
197 * This controller semms to notify of command completed 197 * This controller seems to notify of command completed
198 * event even though it supports none of power 198 * event even though it supports none of power
199 * controller, attention led, power led and EMI. 199 * controller, attention led, power led and EMI.
200 */ 200 */
@@ -926,7 +926,7 @@ struct controller *pcie_init(struct pcie_device *dev)
926 if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f)) 926 if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f))
927 goto abort_ctrl; 927 goto abort_ctrl;
928 928
929 /* Disable sotfware notification */ 929 /* Disable software notification */
930 pcie_disable_notification(ctrl); 930 pcie_disable_notification(ctrl);
931 931
932 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", 932 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index 1f00b937f721..ac69094e4b20 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -52,7 +52,7 @@ static LIST_HEAD(slot_list);
52 do { \ 52 do { \
53 if (debug) \ 53 if (debug) \
54 printk (KERN_DEBUG "%s: " format "\n", \ 54 printk (KERN_DEBUG "%s: " format "\n", \
55 MY_NAME , ## arg); \ 55 MY_NAME , ## arg); \
56 } while (0) 56 } while (0)
57#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) 57#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
58#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) 58#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
@@ -287,7 +287,7 @@ static int __init init_slots(void)
287 hotplug_slot->release = &release_slot; 287 hotplug_slot->release = &release_slot;
288 make_slot_name(slot); 288 make_slot_name(slot);
289 hotplug_slot->ops = &skel_hotplug_slot_ops; 289 hotplug_slot->ops = &skel_hotplug_slot_ops;
290 290
291 /* 291 /*
292 * Initialize the slot info structure with some known 292 * Initialize the slot info structure with some known
293 * good values. 293 * good values.
@@ -296,7 +296,7 @@ static int __init init_slots(void)
296 get_attention_status(hotplug_slot, &info->attention_status); 296 get_attention_status(hotplug_slot, &info->attention_status);
297 get_latch_status(hotplug_slot, &info->latch_status); 297 get_latch_status(hotplug_slot, &info->latch_status);
298 get_adapter_status(hotplug_slot, &info->adapter_status); 298 get_adapter_status(hotplug_slot, &info->adapter_status);
299 299
300 dbg("registering slot %d\n", i); 300 dbg("registering slot %d\n", i);
301 retval = pci_hp_register(slot->hotplug_slot); 301 retval = pci_hp_register(slot->hotplug_slot);
302 if (retval) { 302 if (retval) {
@@ -336,7 +336,7 @@ static void __exit cleanup_slots(void)
336 pci_hp_deregister(slot->hotplug_slot); 336 pci_hp_deregister(slot->hotplug_slot);
337 } 337 }
338} 338}
339 339
340static int __init pcihp_skel_init(void) 340static int __init pcihp_skel_init(void)
341{ 341{
342 int retval; 342 int retval;
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index bb7af78e4eed..e9c044d15add 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -217,7 +217,7 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn)
217 if (!pcibios_find_pci_bus(dn)) 217 if (!pcibios_find_pci_bus(dn))
218 return -EINVAL; 218 return -EINVAL;
219 219
220 /* If pci slot is hotplugable, use hotplug to remove it */ 220 /* If pci slot is hotpluggable, use hotplug to remove it */
221 slot = find_php_slot(dn); 221 slot = find_php_slot(dn);
222 if (slot && rpaphp_deregister_slot(slot)) { 222 if (slot && rpaphp_deregister_slot(slot)) {
223 printk(KERN_ERR "%s: unable to remove hotplug slot %s\n", 223 printk(KERN_ERR "%s: unable to remove hotplug slot %s\n",
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 3135856e5e1c..b2593e876a09 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -49,9 +49,9 @@
49extern bool rpaphp_debug; 49extern bool rpaphp_debug;
50#define dbg(format, arg...) \ 50#define dbg(format, arg...) \
51 do { \ 51 do { \
52 if (rpaphp_debug) \ 52 if (rpaphp_debug) \
53 printk(KERN_DEBUG "%s: " format, \ 53 printk(KERN_DEBUG "%s: " format, \
54 MY_NAME , ## arg); \ 54 MY_NAME , ## arg); \
55 } while (0) 55 } while (0)
56#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 56#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
57#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) 57#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
@@ -99,5 +99,5 @@ void dealloc_slot_struct(struct slot *slot);
99struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain); 99struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain);
100int rpaphp_register_slot(struct slot *slot); 100int rpaphp_register_slot(struct slot *slot);
101int rpaphp_deregister_slot(struct slot *slot); 101int rpaphp_deregister_slot(struct slot *slot);
102 102
103#endif /* _PPC64PHP_H */ 103#endif /* _PPC64PHP_H */
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 127d6e600185..b7fc5c9255a5 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -226,7 +226,7 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
226 for (i = 0; i < indexes[0]; i++) { 226 for (i = 0; i < indexes[0]; i++) {
227 if ((unsigned int) indexes[i + 1] == *my_index) { 227 if ((unsigned int) indexes[i + 1] == *my_index) {
228 if (drc_name) 228 if (drc_name)
229 *drc_name = name_tmp; 229 *drc_name = name_tmp;
230 if (drc_type) 230 if (drc_type)
231 *drc_type = type_tmp; 231 *drc_type = type_tmp;
232 if (drc_index) 232 if (drc_index)
@@ -289,7 +289,7 @@ static int is_php_dn(struct device_node *dn, const int **indexes,
289 * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem. 289 * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem.
290 * @dn: device node of slot 290 * @dn: device node of slot
291 * 291 *
292 * This subroutine will register a hotplugable slot with the 292 * This subroutine will register a hotpluggable slot with the
293 * PCI hotplug infrastructure. This routine is typically called 293 * PCI hotplug infrastructure. This routine is typically called
294 * during boot time, if the hotplug slots are present at boot time, 294 * during boot time, if the hotplug slots are present at boot time,
295 * or is called later, by the dlpar add code, if the slot is 295 * or is called later, by the dlpar add code, if the slot is
@@ -328,7 +328,7 @@ int rpaphp_add_slot(struct device_node *dn)
328 return -ENOMEM; 328 return -ENOMEM;
329 329
330 slot->type = simple_strtoul(type, NULL, 10); 330 slot->type = simple_strtoul(type, NULL, 10);
331 331
332 dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n", 332 dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n",
333 indexes[i + 1], name, type); 333 indexes[i + 1], name, type);
334 334
@@ -356,7 +356,7 @@ static void __exit cleanup_slots(void)
356 /* 356 /*
357 * Unregister all of our slots with the pci_hotplug subsystem, 357 * Unregister all of our slots with the pci_hotplug subsystem,
358 * and free up all memory that we had allocated. 358 * and free up all memory that we had allocated.
359 * memory will be freed in release_slot callback. 359 * memory will be freed in release_slot callback.
360 */ 360 */
361 361
362 list_for_each_safe(tmp, n, &rpaphp_slot_head) { 362 list_for_each_safe(tmp, n, &rpaphp_slot_head) {
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 513e1e282391..9243f3e7a1c9 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -44,7 +44,7 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state)
44 dbg("%s: slot must be power up to get sensor-state\n", 44 dbg("%s: slot must be power up to get sensor-state\n",
45 __func__); 45 __func__);
46 46
47 /* some slots have to be powered up 47 /* some slots have to be powered up
48 * before get-sensor will succeed. 48 * before get-sensor will succeed.
49 */ 49 */
50 rc = rtas_set_power_level(slot->power_domain, POWER_ON, 50 rc = rtas_set_power_level(slot->power_domain, POWER_ON,
@@ -133,4 +133,3 @@ int rpaphp_enable_slot(struct slot *slot)
133 133
134 return 0; 134 return 0;
135} 135}
136
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index b283bbea6d24..a6082cc263f7 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * RPA Virtual I/O device functions 2 * RPA Virtual I/O device functions
3 * Copyright (C) 2004 Linda Xie <lxie@us.ibm.com> 3 * Copyright (C) 2004 Linda Xie <lxie@us.ibm.com>
4 * 4 *
5 * All rights reserved. 5 * All rights reserved.
@@ -51,27 +51,27 @@ struct slot *alloc_slot_struct(struct device_node *dn,
51 int drc_index, char *drc_name, int power_domain) 51 int drc_index, char *drc_name, int power_domain)
52{ 52{
53 struct slot *slot; 53 struct slot *slot;
54 54
55 slot = kzalloc(sizeof(struct slot), GFP_KERNEL); 55 slot = kzalloc(sizeof(struct slot), GFP_KERNEL);
56 if (!slot) 56 if (!slot)
57 goto error_nomem; 57 goto error_nomem;
58 slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); 58 slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
59 if (!slot->hotplug_slot) 59 if (!slot->hotplug_slot)
60 goto error_slot; 60 goto error_slot;
61 slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info), 61 slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
62 GFP_KERNEL); 62 GFP_KERNEL);
63 if (!slot->hotplug_slot->info) 63 if (!slot->hotplug_slot->info)
64 goto error_hpslot; 64 goto error_hpslot;
65 slot->name = kstrdup(drc_name, GFP_KERNEL); 65 slot->name = kstrdup(drc_name, GFP_KERNEL);
66 if (!slot->name) 66 if (!slot->name)
67 goto error_info; 67 goto error_info;
68 slot->dn = dn; 68 slot->dn = dn;
69 slot->index = drc_index; 69 slot->index = drc_index;
70 slot->power_domain = power_domain; 70 slot->power_domain = power_domain;
71 slot->hotplug_slot->private = slot; 71 slot->hotplug_slot->private = slot;
72 slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops; 72 slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
73 slot->hotplug_slot->release = &rpaphp_release_slot; 73 slot->hotplug_slot->release = &rpaphp_release_slot;
74 74
75 return (slot); 75 return (slot);
76 76
77error_info: 77error_info:
@@ -91,7 +91,7 @@ static int is_registered(struct slot *slot)
91 list_for_each_entry(tmp_slot, &rpaphp_slot_head, rpaphp_slot_list) { 91 list_for_each_entry(tmp_slot, &rpaphp_slot_head, rpaphp_slot_list) {
92 if (!strcmp(tmp_slot->name, slot->name)) 92 if (!strcmp(tmp_slot->name, slot->name))
93 return 1; 93 return 1;
94 } 94 }
95 return 0; 95 return 0;
96} 96}
97 97
@@ -104,7 +104,7 @@ int rpaphp_deregister_slot(struct slot *slot)
104 __func__, slot->name); 104 __func__, slot->name);
105 105
106 list_del(&slot->rpaphp_slot_list); 106 list_del(&slot->rpaphp_slot_list);
107 107
108 retval = pci_hp_deregister(php_slot); 108 retval = pci_hp_deregister(php_slot);
109 if (retval) 109 if (retval)
110 err("Problem unregistering a slot %s\n", slot->name); 110 err("Problem unregistering a slot %s\n", slot->name);
@@ -120,7 +120,7 @@ int rpaphp_register_slot(struct slot *slot)
120 int retval; 120 int retval;
121 int slotno; 121 int slotno;
122 122
123 dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", 123 dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
124 __func__, slot->dn->full_name, slot->index, slot->name, 124 __func__, slot->dn->full_name, slot->index, slot->name,
125 slot->power_domain, slot->type); 125 slot->power_domain, slot->type);
126 126
@@ -128,7 +128,7 @@ int rpaphp_register_slot(struct slot *slot)
128 if (is_registered(slot)) { 128 if (is_registered(slot)) {
129 err("rpaphp_register_slot: slot[%s] is already registered\n", slot->name); 129 err("rpaphp_register_slot: slot[%s] is already registered\n", slot->name);
130 return -EAGAIN; 130 return -EAGAIN;
131 } 131 }
132 132
133 if (slot->dn->child) 133 if (slot->dn->child)
134 slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); 134 slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
@@ -145,4 +145,3 @@ int rpaphp_register_slot(struct slot *slot)
145 info("Slot [%s] registered\n", slot->name); 145 info("Slot [%s] registered\n", slot->name);
146 return 0; 146 return 0;
147} 147}
148
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index b2781dfe60e9..5b05a68cca6c 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -9,6 +9,7 @@
9 * Work to add BIOS PROM support was completed by Mike Habeck. 9 * Work to add BIOS PROM support was completed by Mike Habeck.
10 */ 10 */
11 11
12#include <linux/acpi.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/module.h> 15#include <linux/module.h>
@@ -29,7 +30,6 @@
29#include <asm/sn/sn_feature_sets.h> 30#include <asm/sn/sn_feature_sets.h>
30#include <asm/sn/sn_sal.h> 31#include <asm/sn/sn_sal.h>
31#include <asm/sn/types.h> 32#include <asm/sn/types.h>
32#include <linux/acpi.h>
33#include <asm/sn/acpi.h> 33#include <asm/sn/acpi.h>
34 34
35#include "../pci.h" 35#include "../pci.h"
@@ -414,7 +414,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
414 acpi_handle rethandle; 414 acpi_handle rethandle;
415 acpi_status ret; 415 acpi_status ret;
416 416
417 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; 417 phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
418 418
419 if (acpi_bus_get_device(phandle, &pdevice)) { 419 if (acpi_bus_get_device(phandle, &pdevice)) {
420 dev_dbg(&slot->pci_bus->self->dev, 420 dev_dbg(&slot->pci_bus->self->dev,
@@ -495,7 +495,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
495 495
496 /* free the ACPI resources for the slot */ 496 /* free the ACPI resources for the slot */
497 if (SN_ACPI_BASE_SUPPORT() && 497 if (SN_ACPI_BASE_SUPPORT() &&
498 PCI_CONTROLLER(slot->pci_bus)->acpi_handle) { 498 PCI_CONTROLLER(slot->pci_bus)->companion) {
499 unsigned long long adr; 499 unsigned long long adr;
500 struct acpi_device *device; 500 struct acpi_device *device;
501 acpi_handle phandle; 501 acpi_handle phandle;
@@ -504,7 +504,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
504 acpi_status ret; 504 acpi_status ret;
505 505
506 /* Get the rootbus node pointer */ 506 /* Get the rootbus node pointer */
507 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; 507 phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
508 508
509 acpi_scan_lock_acquire(); 509 acpi_scan_lock_acquire();
510 /* 510 /*
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index d876e4b3c6a9..61529097464d 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -216,13 +216,13 @@ struct ctrl_reg {
216 216
217/* offsets to the controller registers based on the above structure layout */ 217/* offsets to the controller registers based on the above structure layout */
218enum ctrl_offsets { 218enum ctrl_offsets {
219 BASE_OFFSET = offsetof(struct ctrl_reg, base_offset), 219 BASE_OFFSET = offsetof(struct ctrl_reg, base_offset),
220 SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1), 220 SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1),
221 SLOT_AVAIL2 = offsetof(struct ctrl_reg, slot_avail2), 221 SLOT_AVAIL2 = offsetof(struct ctrl_reg, slot_avail2),
222 SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config), 222 SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config),
223 SEC_BUS_CONFIG = offsetof(struct ctrl_reg, sec_bus_config), 223 SEC_BUS_CONFIG = offsetof(struct ctrl_reg, sec_bus_config),
224 MSI_CTRL = offsetof(struct ctrl_reg, msi_ctrl), 224 MSI_CTRL = offsetof(struct ctrl_reg, msi_ctrl),
225 PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface), 225 PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface),
226 CMD = offsetof(struct ctrl_reg, cmd), 226 CMD = offsetof(struct ctrl_reg, cmd),
227 CMD_STATUS = offsetof(struct ctrl_reg, cmd_status), 227 CMD_STATUS = offsetof(struct ctrl_reg, cmd_status),
228 INTR_LOC = offsetof(struct ctrl_reg, intr_loc), 228 INTR_LOC = offsetof(struct ctrl_reg, intr_loc),
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index d3f757df691c..faf13abd5b99 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -143,11 +143,11 @@ static int init_slots(struct controller *ctrl)
143 snprintf(name, SLOT_NAME_SIZE, "%d", slot->number); 143 snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
144 hotplug_slot->ops = &shpchp_hotplug_slot_ops; 144 hotplug_slot->ops = &shpchp_hotplug_slot_ops;
145 145
146 ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " 146 ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
147 "hp_slot=%x sun=%x slot_device_offset=%x\n", 147 "hp_slot=%x sun=%x slot_device_offset=%x\n",
148 pci_domain_nr(ctrl->pci_dev->subordinate), 148 pci_domain_nr(ctrl->pci_dev->subordinate),
149 slot->bus, slot->device, slot->hp_slot, slot->number, 149 slot->bus, slot->device, slot->hp_slot, slot->number,
150 ctrl->slot_device_offset); 150 ctrl->slot_device_offset);
151 retval = pci_hp_register(slot->hotplug_slot, 151 retval = pci_hp_register(slot->hotplug_slot,
152 ctrl->pci_dev->subordinate, slot->device, name); 152 ctrl->pci_dev->subordinate, slot->device, name);
153 if (retval) { 153 if (retval) {
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 75ba2311b54f..2d7f474ca0ec 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -116,7 +116,7 @@
116#define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21)) 116#define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21))
117 117
118/* 118/*
119 * SHPC Command Code definitnions 119 * SHPC Command Code definitions
120 * 120 *
121 * Slot Operation 00h - 3Fh 121 * Slot Operation 00h - 3Fh
122 * Set Bus Segment Speed/Mode A 40h - 47h 122 * Set Bus Segment Speed/Mode A 40h - 47h
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 1b90579b233a..50ce68098298 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -37,7 +37,7 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
37 char *type; 37 char *type;
38 struct resource *res; 38 struct resource *res;
39 39
40 handle = DEVICE_ACPI_HANDLE(&dev->dev); 40 handle = ACPI_HANDLE(&dev->dev);
41 if (!handle) 41 if (!handle)
42 return -EINVAL; 42 return -EINVAL;
43 43
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 21a7182dccd4..1fe2d6fb19d5 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -610,7 +610,7 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
610 struct resource tmp; 610 struct resource tmp;
611 enum pci_bar_type type; 611 enum pci_bar_type type;
612 int reg = pci_iov_resource_bar(dev, resno, &type); 612 int reg = pci_iov_resource_bar(dev, resno, &type);
613 613
614 if (!reg) 614 if (!reg)
615 return 0; 615 return 0;
616 616
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index b008cf86b9c3..6684f153ab57 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -25,7 +25,7 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
25/** 25/**
26 * pci_lost_interrupt - reports a lost PCI interrupt 26 * pci_lost_interrupt - reports a lost PCI interrupt
27 * @pdev: device whose interrupt is lost 27 * @pdev: device whose interrupt is lost
28 * 28 *
29 * The primary function of this routine is to report a lost interrupt 29 * The primary function of this routine is to report a lost interrupt
30 * in a standard way which users can recognise (instead of blaming the 30 * in a standard way which users can recognise (instead of blaming the
31 * driver). 31 * driver).
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 5e63645a7abe..3fcd67a16677 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -784,7 +784,7 @@ error:
784 * @nvec: how many MSIs have been requested ? 784 * @nvec: how many MSIs have been requested ?
785 * @type: are we checking for MSI or MSI-X ? 785 * @type: are we checking for MSI or MSI-X ?
786 * 786 *
787 * Look at global flags, the device itself, and its parent busses 787 * Look at global flags, the device itself, and its parent buses
788 * to determine if MSI/-X are supported for the device. If MSI/-X is 788 * to determine if MSI/-X are supported for the device. If MSI/-X is
789 * supported return 0, else return an error code. 789 * supported return 0, else return an error code.
790 **/ 790 **/
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index dfd1f59de729..577074efbe62 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -141,7 +141,7 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
141 * if (_PRW at S-state x) 141 * if (_PRW at S-state x)
142 * choose from highest power _SxD to lowest power _SxW 142 * choose from highest power _SxD to lowest power _SxW
143 * else // no _PRW at S-state x 143 * else // no _PRW at S-state x
144 * choose highest power _SxD or any lower power 144 * choose highest power _SxD or any lower power
145 */ 145 */
146 146
147static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) 147static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
@@ -173,14 +173,14 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
173 173
174static bool acpi_pci_power_manageable(struct pci_dev *dev) 174static bool acpi_pci_power_manageable(struct pci_dev *dev)
175{ 175{
176 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); 176 acpi_handle handle = ACPI_HANDLE(&dev->dev);
177 177
178 return handle ? acpi_bus_power_manageable(handle) : false; 178 return handle ? acpi_bus_power_manageable(handle) : false;
179} 179}
180 180
181static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) 181static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
182{ 182{
183 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); 183 acpi_handle handle = ACPI_HANDLE(&dev->dev);
184 static const u8 state_conv[] = { 184 static const u8 state_conv[] = {
185 [PCI_D0] = ACPI_STATE_D0, 185 [PCI_D0] = ACPI_STATE_D0,
186 [PCI_D1] = ACPI_STATE_D1, 186 [PCI_D1] = ACPI_STATE_D1,
@@ -217,7 +217,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
217 217
218static bool acpi_pci_can_wakeup(struct pci_dev *dev) 218static bool acpi_pci_can_wakeup(struct pci_dev *dev)
219{ 219{
220 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); 220 acpi_handle handle = ACPI_HANDLE(&dev->dev);
221 221
222 return handle ? acpi_bus_can_wakeup(handle) : false; 222 return handle ? acpi_bus_can_wakeup(handle) : false;
223} 223}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 454853507b7e..25f0bc659164 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -19,6 +19,7 @@
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/pm_runtime.h> 20#include <linux/pm_runtime.h>
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/kexec.h>
22#include "pci.h" 23#include "pci.h"
23 24
24struct pci_dynid { 25struct pci_dynid {
@@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
288 int error, node; 289 int error, node;
289 struct drv_dev_and_id ddi = { drv, dev, id }; 290 struct drv_dev_and_id ddi = { drv, dev, id };
290 291
291 /* Execute driver initialization on node where the device's 292 /*
292 bus is attached to. This way the driver likely allocates 293 * Execute driver initialization on node where the device is
293 its local memory on the right node without any need to 294 * attached. This way the driver likely allocates its local memory
294 change it. */ 295 * on the right node.
296 */
295 node = dev_to_node(&dev->dev); 297 node = dev_to_node(&dev->dev);
296 if (node >= 0) { 298
299 /*
300 * On NUMA systems, we are likely to call a PF probe function using
301 * work_on_cpu(). If that probe calls pci_enable_sriov() (which
302 * adds the VF devices via pci_bus_add_device()), we may re-enter
303 * this function to call the VF probe function. Calling
304 * work_on_cpu() again will cause a lockdep warning. Since VFs are
305 * always on the same node as the PF, we can work around this by
306 * avoiding work_on_cpu() when we're already on the correct node.
307 *
308 * Preemption is enabled, so it's theoretically unsafe to use
309 * numa_node_id(), but even if we run the probe function on the
310 * wrong node, it should be functionally correct.
311 */
312 if (node >= 0 && node != numa_node_id()) {
297 int cpu; 313 int cpu;
298 314
299 get_online_cpus(); 315 get_online_cpus();
@@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
305 put_online_cpus(); 321 put_online_cpus();
306 } else 322 } else
307 error = local_pci_probe(&ddi); 323 error = local_pci_probe(&ddi);
324
308 return error; 325 return error;
309} 326}
310 327
@@ -312,7 +329,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
312 * __pci_device_probe - check if a driver wants to claim a specific PCI device 329 * __pci_device_probe - check if a driver wants to claim a specific PCI device
313 * @drv: driver to call to check if it wants the PCI device 330 * @drv: driver to call to check if it wants the PCI device
314 * @pci_dev: PCI device being probed 331 * @pci_dev: PCI device being probed
315 * 332 *
316 * returns 0 on success, else error. 333 * returns 0 on success, else error.
317 * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. 334 * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
318 */ 335 */
@@ -378,7 +395,7 @@ static int pci_device_remove(struct device * dev)
378 * We would love to complain here if pci_dev->is_enabled is set, that 395 * We would love to complain here if pci_dev->is_enabled is set, that
379 * the driver should have called pci_disable_device(), but the 396 * the driver should have called pci_disable_device(), but the
380 * unfortunate fact is there are too many odd BIOS and bridge setups 397 * unfortunate fact is there are too many odd BIOS and bridge setups
381 * that don't like drivers doing that all of the time. 398 * that don't like drivers doing that all of the time.
382 * Oh well, we can dream of sane hardware when we sleep, no matter how 399 * Oh well, we can dream of sane hardware when we sleep, no matter how
383 * horrible the crap we have to deal with is when we are awake... 400 * horrible the crap we have to deal with is when we are awake...
384 */ 401 */
@@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev)
399 pci_msi_shutdown(pci_dev); 416 pci_msi_shutdown(pci_dev);
400 pci_msix_shutdown(pci_dev); 417 pci_msix_shutdown(pci_dev);
401 418
419#ifdef CONFIG_KEXEC
402 /* 420 /*
403 * Turn off Bus Master bit on the device to tell it to not 421 * If this is a kexec reboot, turn off Bus Master bit on the
404 * continue to do DMA. Don't touch devices in D3cold or unknown states. 422 * device to tell it to not continue to do DMA. Don't touch
423 * devices in D3cold or unknown states.
424 * If it is not a kexec reboot, firmware will hit the PCI
425 * devices with big hammer and stop their DMA any way.
405 */ 426 */
406 if (pci_dev->current_state <= PCI_D3hot) 427 if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
407 pci_clear_master(pci_dev); 428 pci_clear_master(pci_dev);
429#endif
408} 430}
409 431
410#ifdef CONFIG_PM 432#ifdef CONFIG_PM
@@ -1156,10 +1178,10 @@ static const struct dev_pm_ops pci_dev_pm_ops = {
1156 * @drv: the driver structure to register 1178 * @drv: the driver structure to register
1157 * @owner: owner module of drv 1179 * @owner: owner module of drv
1158 * @mod_name: module name string 1180 * @mod_name: module name string
1159 * 1181 *
1160 * Adds the driver structure to the list of registered drivers. 1182 * Adds the driver structure to the list of registered drivers.
1161 * Returns a negative value on error, otherwise 0. 1183 * Returns a negative value on error, otherwise 0.
1162 * If no error occurred, the driver remains registered even if 1184 * If no error occurred, the driver remains registered even if
1163 * no device was claimed during registration. 1185 * no device was claimed during registration.
1164 */ 1186 */
1165int __pci_register_driver(struct pci_driver *drv, struct module *owner, 1187int __pci_register_driver(struct pci_driver *drv, struct module *owner,
@@ -1181,7 +1203,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
1181/** 1203/**
1182 * pci_unregister_driver - unregister a pci driver 1204 * pci_unregister_driver - unregister a pci driver
1183 * @drv: the driver structure to unregister 1205 * @drv: the driver structure to unregister
1184 * 1206 *
1185 * Deletes the driver structure from the list of registered PCI drivers, 1207 * Deletes the driver structure from the list of registered PCI drivers,
1186 * gives it a chance to clean up by calling its remove() function for 1208 * gives it a chance to clean up by calling its remove() function for
1187 * each device it was responsible for, and marks those devices as 1209 * each device it was responsible for, and marks those devices as
@@ -1203,7 +1225,7 @@ static struct pci_driver pci_compat_driver = {
1203 * pci_dev_driver - get the pci_driver of a device 1225 * pci_dev_driver - get the pci_driver of a device
1204 * @dev: the device to query 1226 * @dev: the device to query
1205 * 1227 *
1206 * Returns the appropriate pci_driver structure or %NULL if there is no 1228 * Returns the appropriate pci_driver structure or %NULL if there is no
1207 * registered driver for the device. 1229 * registered driver for the device.
1208 */ 1230 */
1209struct pci_driver * 1231struct pci_driver *
@@ -1224,7 +1246,7 @@ pci_dev_driver(const struct pci_dev *dev)
1224 * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure 1246 * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
1225 * @dev: the PCI device structure to match against 1247 * @dev: the PCI device structure to match against
1226 * @drv: the device driver to search for matching PCI device id structures 1248 * @drv: the device driver to search for matching PCI device id structures
1227 * 1249 *
1228 * Used by a driver to check whether a PCI device present in the 1250 * Used by a driver to check whether a PCI device present in the
1229 * system is in its list of supported devices. Returns the matching 1251 * system is in its list of supported devices. Returns the matching
1230 * pci_device_id structure or %NULL if there is no match. 1252 * pci_device_id structure or %NULL if there is no match.
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index edaed6f4da6c..d51f45aa669e 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -263,7 +263,7 @@ device_has_dsm(struct device *dev)
263 acpi_handle handle; 263 acpi_handle handle;
264 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 264 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
265 265
266 handle = DEVICE_ACPI_HANDLE(dev); 266 handle = ACPI_HANDLE(dev);
267 267
268 if (!handle) 268 if (!handle)
269 return FALSE; 269 return FALSE;
@@ -295,7 +295,7 @@ acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf)
295 acpi_handle handle; 295 acpi_handle handle;
296 int length; 296 int length;
297 297
298 handle = DEVICE_ACPI_HANDLE(dev); 298 handle = ACPI_HANDLE(dev);
299 299
300 if (!handle) 300 if (!handle)
301 return -1; 301 return -1;
@@ -316,7 +316,7 @@ acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf)
316 acpi_handle handle; 316 acpi_handle handle;
317 int length; 317 int length;
318 318
319 handle = DEVICE_ACPI_HANDLE(dev); 319 handle = ACPI_HANDLE(dev);
320 320
321 if (!handle) 321 if (!handle)
322 return -1; 322 return -1;
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 6e47c519c510..2ff77509d8e5 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -2,13 +2,13 @@
2 * 2 *
3 * Copyright (C) 2008 Red Hat, Inc. 3 * Copyright (C) 2008 Red Hat, Inc.
4 * Author: 4 * Author:
5 * Chris Wright 5 * Chris Wright
6 * 6 *
7 * This work is licensed under the terms of the GNU GPL, version 2. 7 * This work is licensed under the terms of the GNU GPL, version 2.
8 * 8 *
9 * Usage is simple, allocate a new id to the stub driver and bind the 9 * Usage is simple, allocate a new id to the stub driver and bind the
10 * device to it. For example: 10 * device to it. For example:
11 * 11 *
12 * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id 12 * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id
13 * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind 13 * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
14 * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind 14 * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 2aaa83c85a4e..c91e6c18debc 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -10,7 +10,7 @@
10 * 10 *
11 * File attributes for PCI devices 11 * File attributes for PCI devices
12 * 12 *
13 * Modeled after usb's driverfs.c 13 * Modeled after usb's driverfs.c
14 * 14 *
15 */ 15 */
16 16
@@ -270,13 +270,17 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
270 if (kstrtoul(buf, 0, &val) < 0) 270 if (kstrtoul(buf, 0, &val) < 0)
271 return -EINVAL; 271 return -EINVAL;
272 272
273 /* bad things may happen if the no_msi flag is changed 273 /*
274 * while some drivers are loaded */ 274 * Bad things may happen if the no_msi flag is changed
275 * while drivers are loaded.
276 */
275 if (!capable(CAP_SYS_ADMIN)) 277 if (!capable(CAP_SYS_ADMIN))
276 return -EPERM; 278 return -EPERM;
277 279
278 /* Maybe pci devices without subordinate busses shouldn't even have this 280 /*
279 * attribute in the first place? */ 281 * Maybe devices without subordinate buses shouldn't have this
282 * attribute in the first place?
283 */
280 if (!pdev->subordinate) 284 if (!pdev->subordinate)
281 return count; 285 return count;
282 286
@@ -670,7 +674,7 @@ pci_write_config(struct file* filp, struct kobject *kobj,
670 size = dev->cfg_size - off; 674 size = dev->cfg_size - off;
671 count = size; 675 count = size;
672 } 676 }
673 677
674 pci_config_pm_runtime_get(dev); 678 pci_config_pm_runtime_get(dev);
675 679
676 if ((off & 1) && size) { 680 if ((off & 1) && size) {
@@ -678,7 +682,7 @@ pci_write_config(struct file* filp, struct kobject *kobj,
678 off++; 682 off++;
679 size--; 683 size--;
680 } 684 }
681 685
682 if ((off & 3) && size > 2) { 686 if ((off & 3) && size > 2) {
683 u16 val = data[off - init_off]; 687 u16 val = data[off - init_off];
684 val |= (u16) data[off - init_off + 1] << 8; 688 val |= (u16) data[off - init_off + 1] << 8;
@@ -696,7 +700,7 @@ pci_write_config(struct file* filp, struct kobject *kobj,
696 off += 4; 700 off += 4;
697 size -= 4; 701 size -= 4;
698 } 702 }
699 703
700 if (size >= 2) { 704 if (size >= 2) {
701 u16 val = data[off - init_off]; 705 u16 val = data[off - init_off];
702 val |= (u16) data[off - init_off + 1] << 8; 706 val |= (u16) data[off - init_off + 1] << 8;
@@ -1229,21 +1233,21 @@ pci_read_rom(struct file *filp, struct kobject *kobj,
1229 1233
1230 if (!pdev->rom_attr_enabled) 1234 if (!pdev->rom_attr_enabled)
1231 return -EINVAL; 1235 return -EINVAL;
1232 1236
1233 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 1237 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
1234 if (!rom || !size) 1238 if (!rom || !size)
1235 return -EIO; 1239 return -EIO;
1236 1240
1237 if (off >= size) 1241 if (off >= size)
1238 count = 0; 1242 count = 0;
1239 else { 1243 else {
1240 if (off + count > size) 1244 if (off + count > size)
1241 count = size - off; 1245 count = size - off;
1242 1246
1243 memcpy_fromio(buf, rom + off, count); 1247 memcpy_fromio(buf, rom + off, count);
1244 } 1248 }
1245 pci_unmap_rom(pdev, rom); 1249 pci_unmap_rom(pdev, rom);
1246 1250
1247 return count; 1251 return count;
1248} 1252}
1249 1253
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b127fbda6fc8..07369f32e8bb 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -198,7 +198,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
198} 198}
199 199
200/** 200/**
201 * pci_find_capability - query for devices' capabilities 201 * pci_find_capability - query for devices' capabilities
202 * @dev: PCI device to query 202 * @dev: PCI device to query
203 * @cap: capability code 203 * @cap: capability code
204 * 204 *
@@ -207,12 +207,12 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
207 * device's PCI configuration space or 0 in case the device does not 207 * device's PCI configuration space or 0 in case the device does not
208 * support it. Possible values for @cap: 208 * support it. Possible values for @cap:
209 * 209 *
210 * %PCI_CAP_ID_PM Power Management 210 * %PCI_CAP_ID_PM Power Management
211 * %PCI_CAP_ID_AGP Accelerated Graphics Port 211 * %PCI_CAP_ID_AGP Accelerated Graphics Port
212 * %PCI_CAP_ID_VPD Vital Product Data 212 * %PCI_CAP_ID_VPD Vital Product Data
213 * %PCI_CAP_ID_SLOTID Slot Identification 213 * %PCI_CAP_ID_SLOTID Slot Identification
214 * %PCI_CAP_ID_MSI Message Signalled Interrupts 214 * %PCI_CAP_ID_MSI Message Signalled Interrupts
215 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 215 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
216 * %PCI_CAP_ID_PCIX PCI-X 216 * %PCI_CAP_ID_PCIX PCI-X
217 * %PCI_CAP_ID_EXP PCI Express 217 * %PCI_CAP_ID_EXP PCI Express
218 */ 218 */
@@ -228,13 +228,13 @@ int pci_find_capability(struct pci_dev *dev, int cap)
228} 228}
229 229
230/** 230/**
231 * pci_bus_find_capability - query for devices' capabilities 231 * pci_bus_find_capability - query for devices' capabilities
232 * @bus: the PCI bus to query 232 * @bus: the PCI bus to query
233 * @devfn: PCI device to query 233 * @devfn: PCI device to query
234 * @cap: capability code 234 * @cap: capability code
235 * 235 *
236 * Like pci_find_capability() but works for pci devices that do not have a 236 * Like pci_find_capability() but works for pci devices that do not have a
237 * pci_dev structure set up yet. 237 * pci_dev structure set up yet.
238 * 238 *
239 * Returns the address of the requested capability structure within the 239 * Returns the address of the requested capability structure within the
240 * device's PCI configuration space or 0 in case the device does not 240 * device's PCI configuration space or 0 in case the device does not
@@ -515,7 +515,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
515 return -EINVAL; 515 return -EINVAL;
516 516
517 /* Validate current state: 517 /* Validate current state:
518 * Can enter D0 from any state, but if we can only go deeper 518 * Can enter D0 from any state, but if we can only go deeper
519 * to sleep if we're already in a low power state 519 * to sleep if we're already in a low power state
520 */ 520 */
521 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 521 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
@@ -998,7 +998,7 @@ static void pci_restore_config_space(struct pci_dev *pdev)
998 } 998 }
999} 999}
1000 1000
1001/** 1001/**
1002 * pci_restore_state - Restore the saved state of a PCI device 1002 * pci_restore_state - Restore the saved state of a PCI device
1003 * @dev: - PCI device that we're dealing with 1003 * @dev: - PCI device that we're dealing with
1004 */ 1004 */
@@ -1030,7 +1030,7 @@ struct pci_saved_state {
1030 * the device saved state. 1030 * the device saved state.
1031 * @dev: PCI device that we're dealing with 1031 * @dev: PCI device that we're dealing with
1032 * 1032 *
1033 * Rerturn NULL if no state or error. 1033 * Return NULL if no state or error.
1034 */ 1034 */
1035struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev) 1035struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1036{ 1036{
@@ -1880,7 +1880,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
1880 * pci_dev_run_wake - Check if device can generate run-time wake-up events. 1880 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1881 * @dev: Device to check. 1881 * @dev: Device to check.
1882 * 1882 *
1883 * Return true if the device itself is cabable of generating wake-up events 1883 * Return true if the device itself is capable of generating wake-up events
1884 * (through the platform or using the native PCIe PME) or if the device supports 1884 * (through the platform or using the native PCIe PME) or if the device supports
1885 * PME and one of its upstream bridges can generate wake-up events. 1885 * PME and one of its upstream bridges can generate wake-up events.
1886 */ 1886 */
@@ -2447,7 +2447,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2447 switch (pci_pcie_type(pdev)) { 2447 switch (pci_pcie_type(pdev)) {
2448 /* 2448 /*
2449 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec, 2449 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2450 * but since their primary inteface is PCI/X, we conservatively 2450 * but since their primary interface is PCI/X, we conservatively
2451 * handle them as we would a non-PCIe device. 2451 * handle them as we would a non-PCIe device.
2452 */ 2452 */
2453 case PCI_EXP_TYPE_PCIE_BRIDGE: 2453 case PCI_EXP_TYPE_PCIE_BRIDGE:
@@ -2471,7 +2471,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2471 /* 2471 /*
2472 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be 2472 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2473 * implemented by the remaining PCIe types to indicate peer-to-peer 2473 * implemented by the remaining PCIe types to indicate peer-to-peer
2474 * capabilities, but only when they are part of a multifunciton 2474 * capabilities, but only when they are part of a multifunction
2475 * device. The footnote for section 6.12 indicates the specific 2475 * device. The footnote for section 6.12 indicates the specific
2476 * PCIe types included here. 2476 * PCIe types included here.
2477 */ 2477 */
@@ -2486,7 +2486,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2486 } 2486 }
2487 2487
2488 /* 2488 /*
2489 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable 2489 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
2490 * to single function devices with the exception of downstream ports. 2490 * to single function devices with the exception of downstream ports.
2491 */ 2491 */
2492 return true; 2492 return true;
@@ -2622,7 +2622,7 @@ void pci_release_region(struct pci_dev *pdev, int bar)
2622 * 2622 *
2623 * If @exclusive is set, then the region is marked so that userspace 2623 * If @exclusive is set, then the region is marked so that userspace
2624 * is explicitly not allowed to map the resource via /dev/mem or 2624 * is explicitly not allowed to map the resource via /dev/mem or
2625 * sysfs MMIO access. 2625 * sysfs MMIO access.
2626 * 2626 *
2627 * Returns 0 on success, or %EBUSY on error. A warning 2627 * Returns 0 on success, or %EBUSY on error. A warning
2628 * message is also printed on failure. 2628 * message is also printed on failure.
@@ -2634,7 +2634,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n
2634 2634
2635 if (pci_resource_len(pdev, bar) == 0) 2635 if (pci_resource_len(pdev, bar) == 0)
2636 return 0; 2636 return 0;
2637 2637
2638 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 2638 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2639 if (!request_region(pci_resource_start(pdev, bar), 2639 if (!request_region(pci_resource_start(pdev, bar),
2640 pci_resource_len(pdev, bar), res_name)) 2640 pci_resource_len(pdev, bar), res_name))
@@ -2694,7 +2694,7 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2694 * 2694 *
2695 * The key difference that _exclusive makes it that userspace is 2695 * The key difference that _exclusive makes it that userspace is
2696 * explicitly not allowed to map the resource via /dev/mem or 2696 * explicitly not allowed to map the resource via /dev/mem or
2697 * sysfs. 2697 * sysfs.
2698 */ 2698 */
2699int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 2699int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2700{ 2700{
@@ -2799,7 +2799,7 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2799 * successfully. 2799 * successfully.
2800 * 2800 *
2801 * pci_request_regions_exclusive() will mark the region so that 2801 * pci_request_regions_exclusive() will mark the region so that
2802 * /dev/mem and the sysfs MMIO access will not be allowed. 2802 * /dev/mem and the sysfs MMIO access will not be allowed.
2803 * 2803 *
2804 * Returns 0 on success, or %EBUSY on error. A warning 2804 * Returns 0 on success, or %EBUSY on error. A warning
2805 * message is also printed on failure. 2805 * message is also printed on failure.
@@ -2967,7 +2967,7 @@ pci_set_mwi(struct pci_dev *dev)
2967 cmd |= PCI_COMMAND_INVALIDATE; 2967 cmd |= PCI_COMMAND_INVALIDATE;
2968 pci_write_config_word(dev, PCI_COMMAND, cmd); 2968 pci_write_config_word(dev, PCI_COMMAND, cmd);
2969 } 2969 }
2970 2970
2971 return 0; 2971 return 0;
2972} 2972}
2973 2973
@@ -3292,7 +3292,7 @@ clear:
3292 * 3292 *
3293 * NOTE: This causes the caller to sleep for twice the device power transition 3293 * NOTE: This causes the caller to sleep for twice the device power transition
3294 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms 3294 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3295 * by devault (i.e. unless the @dev's d3_delay field has a different value). 3295 * by default (i.e. unless the @dev's d3_delay field has a different value).
3296 * Moreover, only devices in D0 can be reset by this function. 3296 * Moreover, only devices in D0 can be reset by this function.
3297 */ 3297 */
3298static int pci_pm_reset(struct pci_dev *dev, int probe) 3298static int pci_pm_reset(struct pci_dev *dev, int probe)
@@ -3341,7 +3341,7 @@ void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
3341 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); 3341 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3342 /* 3342 /*
3343 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double 3343 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
3344 * this to 2ms to ensure that we meet the minium requirement. 3344 * this to 2ms to ensure that we meet the minimum requirement.
3345 */ 3345 */
3346 msleep(2); 3346 msleep(2);
3347 3347
@@ -3998,7 +3998,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
3998 return -EINVAL; 3998 return -EINVAL;
3999 3999
4000 v = ffs(mps) - 8; 4000 v = ffs(mps) - 8;
4001 if (v > dev->pcie_mpss) 4001 if (v > dev->pcie_mpss)
4002 return -EINVAL; 4002 return -EINVAL;
4003 v <<= 5; 4003 v <<= 5;
4004 4004
@@ -4165,6 +4165,14 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
4165 return 0; 4165 return 0;
4166} 4166}
4167 4167
4168bool pci_device_is_present(struct pci_dev *pdev)
4169{
4170 u32 v;
4171
4172 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4173}
4174EXPORT_SYMBOL_GPL(pci_device_is_present);
4175
4168#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 4176#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4169static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 4177static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
4170static DEFINE_SPINLOCK(resource_alignment_lock); 4178static DEFINE_SPINLOCK(resource_alignment_lock);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 6b3a958e1be6..b2c8881da764 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -525,7 +525,7 @@ static void handle_error_source(struct pcie_device *aerdev,
525 525
526 if (info->severity == AER_CORRECTABLE) { 526 if (info->severity == AER_CORRECTABLE) {
527 /* 527 /*
528 * Correctable error does not need software intevention. 528 * Correctable error does not need software intervention.
529 * No need to go through error recovery process. 529 * No need to go through error recovery process.
530 */ 530 */
531 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 531 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 403a44374ed5..f1272dc54de1 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -548,7 +548,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
548 548
549/* 549/*
550 * pcie_aspm_init_link_state: Initiate PCI express link state. 550 * pcie_aspm_init_link_state: Initiate PCI express link state.
551 * It is called after the pcie and its children devices are scaned. 551 * It is called after the pcie and its children devices are scanned.
552 * @pdev: the root port or switch downstream port 552 * @pdev: the root port or switch downstream port
553 */ 553 */
554void pcie_aspm_init_link_state(struct pci_dev *pdev) 554void pcie_aspm_init_link_state(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index e56e594ce112..bbc3bdd2b189 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -419,8 +419,8 @@ static void pcie_pme_remove(struct pcie_device *srv)
419 419
420static struct pcie_port_service_driver pcie_pme_driver = { 420static struct pcie_port_service_driver pcie_pme_driver = {
421 .name = "pcie_pme", 421 .name = "pcie_pme",
422 .port_type = PCI_EXP_TYPE_ROOT_PORT, 422 .port_type = PCI_EXP_TYPE_ROOT_PORT,
423 .service = PCIE_PORT_SERVICE_PME, 423 .service = PCIE_PORT_SERVICE_PME,
424 424
425 .probe = pcie_pme_probe, 425 .probe = pcie_pme_probe,
426 .suspend = pcie_pme_suspend, 426 .suspend = pcie_pme_suspend,
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d2eb80aab569..d525548404d6 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -14,7 +14,7 @@
14#define PCIE_PORT_DEVICE_MAXSERVICES 4 14#define PCIE_PORT_DEVICE_MAXSERVICES 4
15/* 15/*
16 * According to the PCI Express Base Specification 2.0, the indices of 16 * According to the PCI Express Base Specification 2.0, the indices of
17 * the MSI-X table entires used by port services must not exceed 31 17 * the MSI-X table entries used by port services must not exceed 31
18 */ 18 */
19#define PCIE_PORT_MAX_MSIX_ENTRIES 32 19#define PCIE_PORT_MAX_MSIX_ENTRIES 32
20 20
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index 67be55a7f260..87e79a6ffb5a 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -18,8 +18,8 @@
18static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); 18static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
19 19
20struct bus_type pcie_port_bus_type = { 20struct bus_type pcie_port_bus_type = {
21 .name = "pci_express", 21 .name = "pci_express",
22 .match = pcie_port_bus_match, 22 .match = pcie_port_bus_match,
23}; 23};
24EXPORT_SYMBOL_GPL(pcie_port_bus_type); 24EXPORT_SYMBOL_GPL(pcie_port_bus_type);
25 25
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 08d131f7815b..0b6e76604068 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -46,7 +46,7 @@ static void release_pcie_device(struct device *dev)
46 * pcie_port_msix_add_entry - add entry to given array of MSI-X entries 46 * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
47 * @entries: Array of MSI-X entries 47 * @entries: Array of MSI-X entries
48 * @new_entry: Index of the entry to add to the array 48 * @new_entry: Index of the entry to add to the array
49 * @nr_entries: Number of entries aleady in the array 49 * @nr_entries: Number of entries already in the array
50 * 50 *
51 * Return value: Position of the added entry in the array 51 * Return value: Position of the added entry in the array
52 */ 52 */
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 696caed5fdf5..0d8fdc48e642 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -223,7 +223,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
223static void pcie_portdrv_remove(struct pci_dev *dev) 223static void pcie_portdrv_remove(struct pci_dev *dev)
224{ 224{
225 pcie_port_device_remove(dev); 225 pcie_port_device_remove(dev);
226 pci_disable_device(dev);
227} 226}
228 227
229static int error_detected_iter(struct device *device, void *data) 228static int error_detected_iter(struct device *device, void *data)
@@ -390,9 +389,9 @@ static struct pci_driver pcie_portdriver = {
390 .probe = pcie_portdrv_probe, 389 .probe = pcie_portdrv_probe,
391 .remove = pcie_portdrv_remove, 390 .remove = pcie_portdrv_remove,
392 391
393 .err_handler = &pcie_portdrv_err_handler, 392 .err_handler = &pcie_portdrv_err_handler,
394 393
395 .driver.pm = PCIE_PORTDRV_PM_OPS, 394 .driver.pm = PCIE_PORTDRV_PM_OPS,
396}; 395};
397 396
398static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) 397static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
@@ -412,7 +411,7 @@ static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
412 .ident = "MSI Wind U-100", 411 .ident = "MSI Wind U-100",
413 .matches = { 412 .matches = {
414 DMI_MATCH(DMI_SYS_VENDOR, 413 DMI_MATCH(DMI_SYS_VENDOR,
415 "MICRO-STAR INTERNATIONAL CO., LTD"), 414 "MICRO-STAR INTERNATIONAL CO., LTD"),
416 DMI_MATCH(DMI_PRODUCT_NAME, "U-100"), 415 DMI_MATCH(DMI_PRODUCT_NAME, "U-100"),
417 }, 416 },
418 }, 417 },
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 5e14f5a51357..38e403dddf6e 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -582,7 +582,7 @@ static enum pci_bus_speed agp_speed(int agp3, int agpstat)
582 index = 1; 582 index = 1;
583 else 583 else
584 goto out; 584 goto out;
585 585
586 if (agp3) { 586 if (agp3) {
587 index += 2; 587 index += 2;
588 if (index == 5) 588 if (index == 5)
@@ -789,7 +789,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
789 } 789 }
790 790
791 /* Disable MasterAbortMode during probing to avoid reporting 791 /* Disable MasterAbortMode during probing to avoid reporting
792 of bus errors (in some architectures) */ 792 of bus errors (in some architectures) */
793 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 793 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
794 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 794 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
795 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 795 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
@@ -1005,7 +1005,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1005 * pci_setup_device - fill in class and map information of a device 1005 * pci_setup_device - fill in class and map information of a device
1006 * @dev: the device structure to fill 1006 * @dev: the device structure to fill
1007 * 1007 *
1008 * Initialize the device structure with information about the device's 1008 * Initialize the device structure with information about the device's
1009 * vendor,class,memory and IO-space addresses,IRQ lines etc. 1009 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1010 * Called at initialisation of the PCI subsystem and by CardBus services. 1010 * Called at initialisation of the PCI subsystem and by CardBus services.
1011 * Returns 0 on success and negative if unknown type of device (not normal, 1011 * Returns 0 on success and negative if unknown type of device (not normal,
@@ -1111,7 +1111,7 @@ int pci_setup_device(struct pci_dev *dev)
1111 goto bad; 1111 goto bad;
1112 /* The PCI-to-PCI bridge spec requires that subtractive 1112 /* The PCI-to-PCI bridge spec requires that subtractive
1113 decoding (i.e. transparent) bridge must have programming 1113 decoding (i.e. transparent) bridge must have programming
1114 interface code of 0x01. */ 1114 interface code of 0x01. */
1115 pci_read_irq(dev); 1115 pci_read_irq(dev);
1116 dev->transparent = ((dev->class & 0xff) == 1); 1116 dev->transparent = ((dev->class & 0xff) == 1);
1117 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 1117 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
@@ -1570,7 +1570,7 @@ static void pcie_write_mrrs(struct pci_dev *dev)
1570 * subsequent read will verify if the value is acceptable or not. 1570 * subsequent read will verify if the value is acceptable or not.
1571 * If the MRRS value provided is not acceptable (e.g., too large), 1571 * If the MRRS value provided is not acceptable (e.g., too large),
1572 * shrink the value until it is acceptable to the HW. 1572 * shrink the value until it is acceptable to the HW.
1573 */ 1573 */
1574 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 1574 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1575 rc = pcie_set_readrq(dev, mrrs); 1575 rc = pcie_set_readrq(dev, mrrs);
1576 if (!rc) 1576 if (!rc)
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index cdc7836d7e3d..46d1378f2e9e 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -222,7 +222,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
222 default: 222 default:
223 ret = -EINVAL; 223 ret = -EINVAL;
224 break; 224 break;
225 }; 225 }
226 226
227 return ret; 227 return ret;
228} 228}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 91490453c229..3a02717473ad 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -9,10 +9,6 @@
9 * 9 *
10 * Init/reset quirks for USB host controllers should be in the 10 * Init/reset quirks for USB host controllers should be in the
11 * USB quirks file, where their drivers can access reuse it. 11 * USB quirks file, where their drivers can access reuse it.
12 *
13 * The bridge optimization stuff has been removed. If you really
14 * have a silly BIOS which is unable to set your host bridge right,
15 * use the PowerTweak utility (see http://powertweak.sourceforge.net).
16 */ 12 */
17 13
18#include <linux/types.h> 14#include <linux/types.h>
@@ -55,7 +51,7 @@ static void quirk_mellanox_tavor(struct pci_dev *dev)
55DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); 51DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
56DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); 52DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
57 53
58/* Deal with broken BIOS'es that neglect to enable passive release, 54/* Deal with broken BIOSes that neglect to enable passive release,
59 which can cause problems in combination with the 82441FX/PPro MTRRs */ 55 which can cause problems in combination with the 82441FX/PPro MTRRs */
60static void quirk_passive_release(struct pci_dev *dev) 56static void quirk_passive_release(struct pci_dev *dev)
61{ 57{
@@ -78,11 +74,11 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p
78 74
79/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround 75/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
80 but VIA don't answer queries. If you happen to have good contacts at VIA 76 but VIA don't answer queries. If you happen to have good contacts at VIA
81 ask them for me please -- Alan 77 ask them for me please -- Alan
82 78
83 This appears to be BIOS not version dependent. So presumably there is a 79 This appears to be BIOS not version dependent. So presumably there is a
84 chipset level fix */ 80 chipset level fix */
85 81
86static void quirk_isa_dma_hangs(struct pci_dev *dev) 82static void quirk_isa_dma_hangs(struct pci_dev *dev)
87{ 83{
88 if (!isa_dma_bridge_buggy) { 84 if (!isa_dma_bridge_buggy) {
@@ -97,7 +93,7 @@ static void quirk_isa_dma_hangs(struct pci_dev *dev)
97DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs); 93DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
98DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs); 94DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
99DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs); 95DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
100DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs); 96DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
101DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs); 97DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
102DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); 98DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
103DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); 99DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
@@ -157,10 +153,10 @@ static void quirk_triton(struct pci_dev *dev)
157 pci_pci_problems |= PCIPCI_TRITON; 153 pci_pci_problems |= PCIPCI_TRITON;
158 } 154 }
159} 155}
160DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton); 156DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
161DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton); 157DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
162DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton); 158DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
163DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton); 159DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
164 160
165/* 161/*
166 * VIA Apollo KT133 needs PCI latency patch 162 * VIA Apollo KT133 needs PCI latency patch
@@ -171,7 +167,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quir
171 * the info on which Mr Breese based his work. 167 * the info on which Mr Breese based his work.
172 * 168 *
173 * Updated based on further information from the site and also on 169 * Updated based on further information from the site and also on
174 * information provided by VIA 170 * information provided by VIA
175 */ 171 */
176static void quirk_vialatency(struct pci_dev *dev) 172static void quirk_vialatency(struct pci_dev *dev)
177{ 173{
@@ -179,7 +175,7 @@ static void quirk_vialatency(struct pci_dev *dev)
179 u8 busarb; 175 u8 busarb;
180 /* Ok we have a potential problem chipset here. Now see if we have 176 /* Ok we have a potential problem chipset here. Now see if we have
181 a buggy southbridge */ 177 a buggy southbridge */
182 178
183 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL); 179 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
184 if (p!=NULL) { 180 if (p!=NULL) {
185 /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */ 181 /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
@@ -194,9 +190,9 @@ static void quirk_vialatency(struct pci_dev *dev)
194 if (p->revision < 0x10 || p->revision > 0x12) 190 if (p->revision < 0x10 || p->revision > 0x12)
195 goto exit; 191 goto exit;
196 } 192 }
197 193
198 /* 194 /*
199 * Ok we have the problem. Now set the PCI master grant to 195 * Ok we have the problem. Now set the PCI master grant to
200 * occur every master grant. The apparent bug is that under high 196 * occur every master grant. The apparent bug is that under high
201 * PCI load (quite common in Linux of course) you can get data 197 * PCI load (quite common in Linux of course) you can get data
202 * loss when the CPU is held off the bus for 3 bus master requests 198 * loss when the CPU is held off the bus for 3 bus master requests
@@ -209,7 +205,7 @@ static void quirk_vialatency(struct pci_dev *dev)
209 */ 205 */
210 206
211 pci_read_config_byte(dev, 0x76, &busarb); 207 pci_read_config_byte(dev, 0x76, &busarb);
212 /* Set bit 4 and bi 5 of byte 76 to 0x01 208 /* Set bit 4 and bi 5 of byte 76 to 0x01
213 "Master priority rotation on every PCI master grant */ 209 "Master priority rotation on every PCI master grant */
214 busarb &= ~(1<<5); 210 busarb &= ~(1<<5);
215 busarb |= (1<<4); 211 busarb |= (1<<4);
@@ -252,7 +248,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx)
252 * that DMA to AGP space. Latency must be set to 0xA and triton 248 * that DMA to AGP space. Latency must be set to 0xA and triton
253 * workaround applied too 249 * workaround applied too
254 * [Info kindly provided by ALi] 250 * [Info kindly provided by ALi]
255 */ 251 */
256static void quirk_alimagik(struct pci_dev *dev) 252static void quirk_alimagik(struct pci_dev *dev)
257{ 253{
258 if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) { 254 if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) {
@@ -260,8 +256,8 @@ static void quirk_alimagik(struct pci_dev *dev)
260 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON; 256 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
261 } 257 }
262} 258}
263DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik); 259DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
264DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik); 260DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
265 261
266/* 262/*
267 * Natoma has some interesting boundary conditions with Zoran stuff 263 * Natoma has some interesting boundary conditions with Zoran stuff
@@ -274,12 +270,12 @@ static void quirk_natoma(struct pci_dev *dev)
274 pci_pci_problems |= PCIPCI_NATOMA; 270 pci_pci_problems |= PCIPCI_NATOMA;
275 } 271 }
276} 272}
277DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma); 273DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
278DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma); 274DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
279DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma); 275DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
280DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma); 276DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
281DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma); 277DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
282DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma); 278DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
283 279
284/* 280/*
285 * This chip can cause PCI parity errors if config register 0xA0 is read 281 * This chip can cause PCI parity errors if config register 0xA0 is read
@@ -400,7 +396,7 @@ static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int p
400 /* 396 /*
401 * For now we only print it out. Eventually we'll want to 397 * For now we only print it out. Eventually we'll want to
402 * reserve it (at least if it's in the 0x1000+ range), but 398 * reserve it (at least if it's in the 0x1000+ range), but
403 * let's get enough confirmation reports first. 399 * let's get enough confirmation reports first.
404 */ 400 */
405 base &= -size; 401 base &= -size;
406 dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1); 402 dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
@@ -425,7 +421,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
425 } 421 }
426 /* 422 /*
427 * For now we only print it out. Eventually we'll want to 423 * For now we only print it out. Eventually we'll want to
428 * reserve it, but let's get enough confirmation reports first. 424 * reserve it, but let's get enough confirmation reports first.
429 */ 425 */
430 base &= -size; 426 base &= -size;
431 dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1); 427 dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
@@ -682,7 +678,7 @@ static void quirk_xio2000a(struct pci_dev *dev)
682DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, 678DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
683 quirk_xio2000a); 679 quirk_xio2000a);
684 680
685#ifdef CONFIG_X86_IO_APIC 681#ifdef CONFIG_X86_IO_APIC
686 682
687#include <asm/io_apic.h> 683#include <asm/io_apic.h>
688 684
@@ -696,12 +692,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
696static void quirk_via_ioapic(struct pci_dev *dev) 692static void quirk_via_ioapic(struct pci_dev *dev)
697{ 693{
698 u8 tmp; 694 u8 tmp;
699 695
700 if (nr_ioapics < 1) 696 if (nr_ioapics < 1)
701 tmp = 0; /* nothing routed to external APIC */ 697 tmp = 0; /* nothing routed to external APIC */
702 else 698 else
703 tmp = 0x1f; /* all known bits (4-0) routed to external APIC */ 699 tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
704 700
705 dev_info(&dev->dev, "%sbling VIA external APIC routing\n", 701 dev_info(&dev->dev, "%sbling VIA external APIC routing\n",
706 tmp == 0 ? "Disa" : "Ena"); 702 tmp == 0 ? "Disa" : "Ena");
707 703
@@ -712,7 +708,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_i
712DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); 708DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
713 709
714/* 710/*
715 * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. 711 * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit.
716 * This leads to doubled level interrupt rates. 712 * This leads to doubled level interrupt rates.
717 * Set this bit to get rid of cycle wastage. 713 * Set this bit to get rid of cycle wastage.
718 * Otherwise uncritical. 714 * Otherwise uncritical.
@@ -986,7 +982,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, qu
986static void quirk_disable_pxb(struct pci_dev *pdev) 982static void quirk_disable_pxb(struct pci_dev *pdev)
987{ 983{
988 u16 config; 984 u16 config;
989 985
990 if (pdev->revision != 0x04) /* Only C0 requires this */ 986 if (pdev->revision != 0x04) /* Only C0 requires this */
991 return; 987 return;
992 pci_read_config_word(pdev, 0x40, &config); 988 pci_read_config_word(pdev, 0x40, &config);
@@ -1094,11 +1090,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e
1094 * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge 1090 * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
1095 * is not activated. The myth is that Asus said that they do not want the 1091 * is not activated. The myth is that Asus said that they do not want the
1096 * users to be irritated by just another PCI Device in the Win98 device 1092 * users to be irritated by just another PCI Device in the Win98 device
1097 * manager. (see the file prog/hotplug/README.p4b in the lm_sensors 1093 * manager. (see the file prog/hotplug/README.p4b in the lm_sensors
1098 * package 2.7.0 for details) 1094 * package 2.7.0 for details)
1099 * 1095 *
1100 * The SMBus PCI Device can be activated by setting a bit in the ICH LPC 1096 * The SMBus PCI Device can be activated by setting a bit in the ICH LPC
1101 * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it 1097 * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
1102 * becomes necessary to do this tweak in two steps -- the chosen trigger 1098 * becomes necessary to do this tweak in two steps -- the chosen trigger
1103 * is either the Host bridge (preferred) or on-board VGA controller. 1099 * is either the Host bridge (preferred) or on-board VGA controller.
1104 * 1100 *
@@ -1253,7 +1249,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asu
1253static void asus_hides_smbus_lpc(struct pci_dev *dev) 1249static void asus_hides_smbus_lpc(struct pci_dev *dev)
1254{ 1250{
1255 u16 val; 1251 u16 val;
1256 1252
1257 if (likely(!asus_hides_smbus)) 1253 if (likely(!asus_hides_smbus))
1258 return; 1254 return;
1259 1255
@@ -1640,8 +1636,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1640 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", 1636 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1641 dev->vendor, dev->device); 1637 dev->vendor, dev->device);
1642} 1638}
1643DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); 1639DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1644DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); 1640DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1645 1641
1646/* 1642/*
1647 * disable boot interrupts on HT-1000 1643 * disable boot interrupts on HT-1000
@@ -1673,8 +1669,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
1673 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", 1669 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1674 dev->vendor, dev->device); 1670 dev->vendor, dev->device);
1675} 1671}
1676DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); 1672DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
1677DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); 1673DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
1678 1674
1679/* 1675/*
1680 * disable boot interrupts on AMD and ATI chipsets 1676 * disable boot interrupts on AMD and ATI chipsets
@@ -1730,8 +1726,8 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
1730 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", 1726 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1731 dev->vendor, dev->device); 1727 dev->vendor, dev->device);
1732} 1728}
1733DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); 1729DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
1734DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); 1730DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
1735#endif /* CONFIG_X86_IO_APIC */ 1731#endif /* CONFIG_X86_IO_APIC */
1736 1732
1737/* 1733/*
@@ -2127,8 +2123,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
2127#ifdef CONFIG_PCI_MSI 2123#ifdef CONFIG_PCI_MSI
2128/* Some chipsets do not support MSI. We cannot easily rely on setting 2124/* Some chipsets do not support MSI. We cannot easily rely on setting
2129 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually 2125 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
2130 * some other busses controlled by the chipset even if Linux is not 2126 * some other buses controlled by the chipset even if Linux is not
2131 * aware of it. Instead of setting the flag on all busses in the 2127 * aware of it. Instead of setting the flag on all buses in the
2132 * machine, simply disable MSI globally. 2128 * machine, simply disable MSI globally.
2133 */ 2129 */
2134static void quirk_disable_all_msi(struct pci_dev *dev) 2130static void quirk_disable_all_msi(struct pci_dev *dev)
@@ -2288,14 +2284,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2288 nvenet_msi_disable); 2284 nvenet_msi_disable);
2289 2285
2290/* 2286/*
2291 * Some versions of the MCP55 bridge from nvidia have a legacy irq routing 2287 * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
2292 * config register. This register controls the routing of legacy interrupts 2288 * config register. This register controls the routing of legacy
2293 * from devices that route through the MCP55. If this register is misprogramed 2289 * interrupts from devices that route through the MCP55. If this register
2294 * interrupts are only sent to the bsp, unlike conventional systems where the 2290 * is misprogrammed, interrupts are only sent to the BSP, unlike
2295 * irq is broadxast to all online cpus. Not having this register set 2291 * conventional systems where the IRQ is broadcast to all online CPUs. Not
2296 * properly prevents kdump from booting up properly, so lets make sure that 2292 * having this register set properly prevents kdump from booting up
2297 * we have it set correctly. 2293 * properly, so let's make sure that we have it set correctly.
2298 * Note this is an undocumented register. 2294 * Note that this is an undocumented register.
2299 */ 2295 */
2300static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev) 2296static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2301{ 2297{
@@ -2626,7 +2622,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
2626/* Allow manual resource allocation for PCI hotplug bridges 2622/* Allow manual resource allocation for PCI hotplug bridges
2627 * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For 2623 * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
2628 * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6), 2624 * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
2629 * kernel fails to allocate resources when hotplug device is 2625 * kernel fails to allocate resources when hotplug device is
2630 * inserted and PCI bus is rescanned. 2626 * inserted and PCI bus is rescanned.
2631 */ 2627 */
2632static void quirk_hotplug_bridge(struct pci_dev *dev) 2628static void quirk_hotplug_bridge(struct pci_dev *dev)
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 8fc54b7327bc..cc9337a71529 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -7,7 +7,7 @@ static void pci_free_resources(struct pci_dev *dev)
7{ 7{
8 int i; 8 int i;
9 9
10 msi_remove_pci_irq_vectors(dev); 10 msi_remove_pci_irq_vectors(dev);
11 11
12 pci_cleanup_rom(dev); 12 pci_cleanup_rom(dev);
13 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 13 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
@@ -24,7 +24,7 @@ static void pci_stop_dev(struct pci_dev *dev)
24 if (dev->is_added) { 24 if (dev->is_added) {
25 pci_proc_detach_device(dev); 25 pci_proc_detach_device(dev);
26 pci_remove_sysfs_dev_files(dev); 26 pci_remove_sysfs_dev_files(dev);
27 device_del(&dev->dev); 27 device_release_driver(&dev->dev);
28 dev->is_added = 0; 28 dev->is_added = 0;
29 } 29 }
30 30
@@ -34,6 +34,8 @@ static void pci_stop_dev(struct pci_dev *dev)
34 34
35static void pci_destroy_dev(struct pci_dev *dev) 35static void pci_destroy_dev(struct pci_dev *dev)
36{ 36{
37 device_del(&dev->dev);
38
37 down_write(&pci_bus_sem); 39 down_write(&pci_bus_sem);
38 list_del(&dev->bus_list); 40 list_del(&dev->bus_list);
39 up_write(&pci_bus_sem); 41 up_write(&pci_bus_sem);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index d0627fa9f368..3ff2ac7c14e2 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * PCI searching functions. 2 * PCI searching functions.
3 * 3 *
4 * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter, 4 * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang 5 * David Mosberger-Tang
@@ -96,12 +96,12 @@ struct pci_bus * pci_find_bus(int domain, int busnr)
96 * pci_find_next_bus - begin or continue searching for a PCI bus 96 * pci_find_next_bus - begin or continue searching for a PCI bus
97 * @from: Previous PCI bus found, or %NULL for new search. 97 * @from: Previous PCI bus found, or %NULL for new search.
98 * 98 *
99 * Iterates through the list of known PCI busses. A new search is 99 * Iterates through the list of known PCI buses. A new search is
100 * initiated by passing %NULL as the @from argument. Otherwise if 100 * initiated by passing %NULL as the @from argument. Otherwise if
101 * @from is not %NULL, searches continue from next device on the 101 * @from is not %NULL, searches continue from next device on the
102 * global list. 102 * global list.
103 */ 103 */
104struct pci_bus * 104struct pci_bus *
105pci_find_next_bus(const struct pci_bus *from) 105pci_find_next_bus(const struct pci_bus *from)
106{ 106{
107 struct list_head *n; 107 struct list_head *n;
@@ -119,11 +119,11 @@ pci_find_next_bus(const struct pci_bus *from)
119/** 119/**
120 * pci_get_slot - locate PCI device for a given PCI slot 120 * pci_get_slot - locate PCI device for a given PCI slot
121 * @bus: PCI bus on which desired PCI device resides 121 * @bus: PCI bus on which desired PCI device resides
122 * @devfn: encodes number of PCI slot in which the desired PCI 122 * @devfn: encodes number of PCI slot in which the desired PCI
123 * device resides and the logical device number within that slot 123 * device resides and the logical device number within that slot
124 * in case of multi-function devices. 124 * in case of multi-function devices.
125 * 125 *
126 * Given a PCI bus and slot/function number, the desired PCI device 126 * Given a PCI bus and slot/function number, the desired PCI device
127 * is located in the list of PCI devices. 127 * is located in the list of PCI devices.
128 * If the device is found, its reference count is increased and this 128 * If the device is found, its reference count is increased and this
129 * function returns a pointer to its data structure. The caller must 129 * function returns a pointer to its data structure. The caller must
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 4ce83b26ae9e..219a4106480a 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -292,8 +292,8 @@ static void assign_requested_resources_sorted(struct list_head *head,
292 (!(res->flags & IORESOURCE_ROM_ENABLE)))) 292 (!(res->flags & IORESOURCE_ROM_ENABLE))))
293 add_to_list(fail_head, 293 add_to_list(fail_head,
294 dev_res->dev, res, 294 dev_res->dev, res,
295 0 /* dont care */, 295 0 /* don't care */,
296 0 /* dont care */); 296 0 /* don't care */);
297 } 297 }
298 reset_resource(res); 298 reset_resource(res);
299 } 299 }
@@ -667,9 +667,9 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
667 if (!io) { 667 if (!io) {
668 pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); 668 pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
669 pci_read_config_word(bridge, PCI_IO_BASE, &io); 669 pci_read_config_word(bridge, PCI_IO_BASE, &io);
670 pci_write_config_word(bridge, PCI_IO_BASE, 0x0); 670 pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
671 } 671 }
672 if (io) 672 if (io)
673 b_res[0].flags |= IORESOURCE_IO; 673 b_res[0].flags |= IORESOURCE_IO;
674 /* DECchip 21050 pass 2 errata: the bridge may miss an address 674 /* DECchip 21050 pass 2 errata: the bridge may miss an address
675 disconnect boundary by one PCI data phase. 675 disconnect boundary by one PCI data phase.
@@ -819,7 +819,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
819 resource_size_t min_align, align; 819 resource_size_t min_align, align;
820 820
821 if (!b_res) 821 if (!b_res)
822 return; 822 return;
823 823
824 min_align = window_alignment(bus, IORESOURCE_IO); 824 min_align = window_alignment(bus, IORESOURCE_IO);
825 list_for_each_entry(dev, &bus->devices, bus_list) { 825 list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -950,7 +950,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
950 if (realloc_head && i >= PCI_IOV_RESOURCES && 950 if (realloc_head && i >= PCI_IOV_RESOURCES &&
951 i <= PCI_IOV_RESOURCE_END) { 951 i <= PCI_IOV_RESOURCE_END) {
952 r->end = r->start - 1; 952 r->end = r->start - 1;
953 add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */); 953 add_to_list(realloc_head, dev, r, r_size, 0/* don't care */);
954 children_add_size += r_size; 954 children_add_size += r_size;
955 continue; 955 continue;
956 } 956 }
@@ -1456,8 +1456,8 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus,
1456 1456
1457/* 1457/*
1458 * first try will not touch pci bridge res 1458 * first try will not touch pci bridge res
1459 * second and later try will clear small leaf bridge res 1459 * second and later try will clear small leaf bridge res
1460 * will stop till to the max deepth if can not find good one 1460 * will stop till to the max depth if can not find good one
1461 */ 1461 */
1462void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) 1462void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
1463{ 1463{
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 07f2eddc09ce..83c4d3bc47ab 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -159,7 +159,7 @@ resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
159 return 0; 159 return 0;
160} 160}
161 161
162static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, 162static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
163 int resno, resource_size_t size) 163 int resno, resource_size_t size)
164{ 164{
165 struct resource *root, *conflict; 165 struct resource *root, *conflict;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index c1e9284a677b..448ca562d1f8 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -53,7 +53,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
53static const char *pci_bus_speed_strings[] = { 53static const char *pci_bus_speed_strings[] = {
54 "33 MHz PCI", /* 0x00 */ 54 "33 MHz PCI", /* 0x00 */
55 "66 MHz PCI", /* 0x01 */ 55 "66 MHz PCI", /* 0x01 */
56 "66 MHz PCI-X", /* 0x02 */ 56 "66 MHz PCI-X", /* 0x02 */
57 "100 MHz PCI-X", /* 0x03 */ 57 "100 MHz PCI-X", /* 0x03 */
58 "133 MHz PCI-X", /* 0x04 */ 58 "133 MHz PCI-X", /* 0x04 */
59 NULL, /* 0x05 */ 59 NULL, /* 0x05 */
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index e1c1ec540893..24750a1b39b6 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -44,7 +44,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
44 default: 44 default:
45 err = -EINVAL; 45 err = -EINVAL;
46 goto error; 46 goto error;
47 }; 47 }
48 48
49 err = -EIO; 49 err = -EIO;
50 if (cfg_ret != PCIBIOS_SUCCESSFUL) 50 if (cfg_ret != PCIBIOS_SUCCESSFUL)
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
index 4780959e11d4..5183e7bb8de3 100644
--- a/drivers/pinctrl/pinctrl-abx500.c
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -418,7 +418,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
418 ret = abx500_gpio_set_bits(chip, 418 ret = abx500_gpio_set_bits(chip,
419 AB8500_GPIO_ALTFUN_REG, 419 AB8500_GPIO_ALTFUN_REG,
420 af.alt_bit1, 420 af.alt_bit1,
421 !!(af.alta_val && BIT(0))); 421 !!(af.alta_val & BIT(0)));
422 if (ret < 0) 422 if (ret < 0)
423 goto out; 423 goto out;
424 424
@@ -439,7 +439,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
439 goto out; 439 goto out;
440 440
441 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG, 441 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
442 af.alt_bit1, !!(af.altb_val && BIT(0))); 442 af.alt_bit1, !!(af.altb_val & BIT(0)));
443 if (ret < 0) 443 if (ret < 0)
444 goto out; 444 goto out;
445 445
@@ -462,7 +462,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
462 goto out; 462 goto out;
463 463
464 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG, 464 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
465 af.alt_bit2, !!(af.altc_val && BIT(1))); 465 af.alt_bit2, !!(af.altc_val & BIT(1)));
466 break; 466 break;
467 467
468 default: 468 default:
diff --git a/drivers/pinctrl/pinctrl-abx500.h b/drivers/pinctrl/pinctrl-abx500.h
index eeca8f973999..82293806e842 100644
--- a/drivers/pinctrl/pinctrl-abx500.h
+++ b/drivers/pinctrl/pinctrl-abx500.h
@@ -1,4 +1,4 @@
1#ifndef PINCTRL_PINCTRL_ABx5O0_H 1#ifndef PINCTRL_PINCTRL_ABx500_H
2#define PINCTRL_PINCTRL_ABx500_H 2#define PINCTRL_PINCTRL_ABx500_H
3 3
4/* Package definitions */ 4/* Package definitions */
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index e939c28cbf1f..46dddc159286 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -504,6 +504,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
504 data |= (3 << bit); 504 data |= (3 << bit);
505 break; 505 break;
506 default: 506 default:
507 spin_unlock_irqrestore(&bank->slock, flags);
507 dev_err(info->dev, "unsupported pull setting %d\n", 508 dev_err(info->dev, "unsupported pull setting %d\n",
508 pull); 509 pull);
509 return -EINVAL; 510 return -EINVAL;
@@ -1453,8 +1454,8 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
1453 if (ctrl->type == RK3188) { 1454 if (ctrl->type == RK3188) {
1454 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1455 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1455 info->reg_pull = devm_ioremap_resource(&pdev->dev, res); 1456 info->reg_pull = devm_ioremap_resource(&pdev->dev, res);
1456 if (IS_ERR(info->reg_base)) 1457 if (IS_ERR(info->reg_pull))
1457 return PTR_ERR(info->reg_base); 1458 return PTR_ERR(info->reg_pull);
1458 } 1459 }
1459 1460
1460 ret = rockchip_gpiolib_register(pdev, info); 1461 ret = rockchip_gpiolib_register(pdev, info);
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index 009174d07767..bc5eb453a45c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -3720,7 +3720,7 @@ static void __iomem *r8a7740_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
3720 const struct r8a7740_portcr_group *group = 3720 const struct r8a7740_portcr_group *group =
3721 &r8a7740_portcr_offsets[i]; 3721 &r8a7740_portcr_offsets[i];
3722 3722
3723 if (i <= group->end_pin) 3723 if (pin <= group->end_pin)
3724 return pfc->window->virt + group->offset + pin; 3724 return pfc->window->virt + group->offset + pin;
3725 } 3725 }
3726 3726
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
index 70b522d34821..cc097b693820 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7372.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
@@ -2584,7 +2584,7 @@ static void __iomem *sh7372_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
2584 const struct sh7372_portcr_group *group = 2584 const struct sh7372_portcr_group *group =
2585 &sh7372_portcr_offsets[i]; 2585 &sh7372_portcr_offsets[i];
2586 2586
2587 if (i <= group->end_pin) 2587 if (pin <= group->end_pin)
2588 return pfc->window->virt + group->offset + pin; 2588 return pfc->window->virt + group->offset + pin;
2589 } 2589 }
2590 2590
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 69616aeaa966..09fde58b12e0 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -5,3 +5,4 @@ if GOLDFISH
5source "drivers/platform/goldfish/Kconfig" 5source "drivers/platform/goldfish/Kconfig"
6endif 6endif
7 7
8source "drivers/platform/chrome/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 8a44a4cd6d1e..3656b7b17b99 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_X86) += x86/ 5obj-$(CONFIG_X86) += x86/
6obj-$(CONFIG_OLPC) += olpc/ 6obj-$(CONFIG_OLPC) += olpc/
7obj-$(CONFIG_GOLDFISH) += goldfish/ 7obj-$(CONFIG_GOLDFISH) += goldfish/
8obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
new file mode 100644
index 000000000000..b13303e75a34
--- /dev/null
+++ b/drivers/platform/chrome/Kconfig
@@ -0,0 +1,28 @@
1#
2# Platform support for Chrome OS hardware (Chromebooks and Chromeboxes)
3#
4
5menuconfig CHROME_PLATFORMS
6 bool "Platform support for Chrome hardware"
7 depends on X86
8 ---help---
9 Say Y here to get to see options for platform support for
10 various Chromebooks and Chromeboxes. This option alone does
11 not add any kernel code.
12
13 If you say N, all options in this submenu will be skipped and disabled.
14
15if CHROME_PLATFORMS
16
17config CHROMEOS_LAPTOP
18 tristate "Chrome OS Laptop"
19 depends on I2C
20 depends on DMI
21 ---help---
22 This driver instantiates i2c and smbus devices such as
23 light sensors and touchpads.
24
25 If you have a supported Chromebook, choose Y or M here.
26 The module will be called chromeos_laptop.
27
28endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
new file mode 100644
index 000000000000..015e9195e226
--- /dev/null
+++ b/drivers/platform/chrome/Makefile
@@ -0,0 +1,2 @@
1
2obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 3e5b4497a1d0..3e5b4497a1d0 100644
--- a/drivers/platform/x86/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index b51a7460cc49..d9dcd37b5a52 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -79,17 +79,6 @@ config ASUS_LAPTOP
79 79
80 If you have an ACPI-compatible ASUS laptop, say Y or M here. 80 If you have an ACPI-compatible ASUS laptop, say Y or M here.
81 81
82config CHROMEOS_LAPTOP
83 tristate "Chrome OS Laptop"
84 depends on I2C
85 depends on DMI
86 ---help---
87 This driver instantiates i2c and smbus devices such as
88 light sensors and touchpads.
89
90 If you have a supported Chromebook, choose Y or M here.
91 The module will be called chromeos_laptop.
92
93config DELL_LAPTOP 82config DELL_LAPTOP
94 tristate "Dell Laptop Extras" 83 tristate "Dell Laptop Extras"
95 depends on X86 84 depends on X86
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 5dbe19324351..f0e6aa407ffb 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
50obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o 50obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
51obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o 51obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
52obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o 52obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
53obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
54obj-$(CONFIG_INTEL_RST) += intel-rst.o 53obj-$(CONFIG_INTEL_RST) += intel-rst.o
55obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o 54obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
56 55
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 605a9be55129..b9429fbf1cd8 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -519,7 +519,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
519 519
520 gmux_data->power_state = VGA_SWITCHEROO_ON; 520 gmux_data->power_state = VGA_SWITCHEROO_ON;
521 521
522 gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev); 522 gmux_data->dhandle = ACPI_HANDLE(&pnp->dev);
523 if (!gmux_data->dhandle) { 523 if (!gmux_data->dhandle) {
524 pr_err("Cannot find acpi handle for pnp device %s\n", 524 pr_err("Cannot find acpi handle for pnp device %s\n",
525 dev_name(&pnp->dev)); 525 dev_name(&pnp->dev));
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 0e9c169b42f8..594323a926cf 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1494,10 +1494,9 @@ static int asus_input_init(struct asus_laptop *asus)
1494 int error; 1494 int error;
1495 1495
1496 input = input_allocate_device(); 1496 input = input_allocate_device();
1497 if (!input) { 1497 if (!input)
1498 pr_warn("Unable to allocate input device\n");
1499 return -ENOMEM; 1498 return -ENOMEM;
1500 } 1499
1501 input->name = "Asus Laptop extra buttons"; 1500 input->name = "Asus Laptop extra buttons";
1502 input->phys = ASUS_LAPTOP_FILE "/input0"; 1501 input->phys = ASUS_LAPTOP_FILE "/input0";
1503 input->id.bustype = BUS_HOST; 1502 input->id.bustype = BUS_HOST;
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index bb77e18b3dd4..c608b1d33f4a 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -21,6 +21,7 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/dmi.h> 22#include <linux/dmi.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/rfkill.h>
24#include <linux/power_supply.h> 25#include <linux/power_supply.h>
25#include <linux/acpi.h> 26#include <linux/acpi.h>
26#include <linux/mm.h> 27#include <linux/mm.h>
@@ -89,6 +90,13 @@ static struct platform_driver platform_driver = {
89 90
90static struct platform_device *platform_device; 91static struct platform_device *platform_device;
91static struct backlight_device *dell_backlight_device; 92static struct backlight_device *dell_backlight_device;
93static struct rfkill *wifi_rfkill;
94static struct rfkill *bluetooth_rfkill;
95static struct rfkill *wwan_rfkill;
96static bool force_rfkill;
97
98module_param(force_rfkill, bool, 0444);
99MODULE_PARM_DESC(force_rfkill, "enable rfkill on non whitelisted models");
92 100
93static const struct dmi_system_id dell_device_table[] __initconst = { 101static const struct dmi_system_id dell_device_table[] __initconst = {
94 { 102 {
@@ -355,6 +363,108 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
355 return buffer; 363 return buffer;
356} 364}
357 365
366/* Derived from information in DellWirelessCtl.cpp:
367 Class 17, select 11 is radio control. It returns an array of 32-bit values.
368
369 Input byte 0 = 0: Wireless information
370
371 result[0]: return code
372 result[1]:
373 Bit 0: Hardware switch supported
374 Bit 1: Wifi locator supported
375 Bit 2: Wifi is supported
376 Bit 3: Bluetooth is supported
377 Bit 4: WWAN is supported
378 Bit 5: Wireless keyboard supported
379 Bits 6-7: Reserved
380 Bit 8: Wifi is installed
381 Bit 9: Bluetooth is installed
382 Bit 10: WWAN is installed
383 Bits 11-15: Reserved
384 Bit 16: Hardware switch is on
385 Bit 17: Wifi is blocked
386 Bit 18: Bluetooth is blocked
387 Bit 19: WWAN is blocked
388 Bits 20-31: Reserved
389 result[2]: NVRAM size in bytes
390 result[3]: NVRAM format version number
391
392 Input byte 0 = 2: Wireless switch configuration
393 result[0]: return code
394 result[1]:
395 Bit 0: Wifi controlled by switch
396 Bit 1: Bluetooth controlled by switch
397 Bit 2: WWAN controlled by switch
398 Bits 3-6: Reserved
399 Bit 7: Wireless switch config locked
400 Bit 8: Wifi locator enabled
401 Bits 9-14: Reserved
402 Bit 15: Wifi locator setting locked
403 Bits 16-31: Reserved
404*/
405
406static int dell_rfkill_set(void *data, bool blocked)
407{
408 int disable = blocked ? 1 : 0;
409 unsigned long radio = (unsigned long)data;
410 int hwswitch_bit = (unsigned long)data - 1;
411
412 get_buffer();
413 dell_send_request(buffer, 17, 11);
414
415 /* If the hardware switch controls this radio, and the hardware
416 switch is disabled, always disable the radio */
417 if ((hwswitch_state & BIT(hwswitch_bit)) &&
418 !(buffer->output[1] & BIT(16)))
419 disable = 1;
420
421 buffer->input[0] = (1 | (radio<<8) | (disable << 16));
422 dell_send_request(buffer, 17, 11);
423
424 release_buffer();
425 return 0;
426}
427
428/* Must be called with the buffer held */
429static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
430 int status)
431{
432 if (status & BIT(0)) {
433 /* Has hw-switch, sync sw_state to BIOS */
434 int block = rfkill_blocked(rfkill);
435 buffer->input[0] = (1 | (radio << 8) | (block << 16));
436 dell_send_request(buffer, 17, 11);
437 } else {
438 /* No hw-switch, sync BIOS state to sw_state */
439 rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
440 }
441}
442
443static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
444 int status)
445{
446 if (hwswitch_state & (BIT(radio - 1)))
447 rfkill_set_hw_state(rfkill, !(status & BIT(16)));
448}
449
450static void dell_rfkill_query(struct rfkill *rfkill, void *data)
451{
452 int status;
453
454 get_buffer();
455 dell_send_request(buffer, 17, 11);
456 status = buffer->output[1];
457
458 dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status);
459
460 release_buffer();
461}
462
463static const struct rfkill_ops dell_rfkill_ops = {
464 .set_block = dell_rfkill_set,
465 .query = dell_rfkill_query,
466};
467
358static struct dentry *dell_laptop_dir; 468static struct dentry *dell_laptop_dir;
359 469
360static int dell_debugfs_show(struct seq_file *s, void *data) 470static int dell_debugfs_show(struct seq_file *s, void *data)
@@ -424,6 +534,136 @@ static const struct file_operations dell_debugfs_fops = {
424 .release = single_release, 534 .release = single_release,
425}; 535};
426 536
537static void dell_update_rfkill(struct work_struct *ignored)
538{
539 int status;
540
541 get_buffer();
542 dell_send_request(buffer, 17, 11);
543 status = buffer->output[1];
544
545 if (wifi_rfkill) {
546 dell_rfkill_update_hw_state(wifi_rfkill, 1, status);
547 dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
548 }
549 if (bluetooth_rfkill) {
550 dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status);
551 dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
552 }
553 if (wwan_rfkill) {
554 dell_rfkill_update_hw_state(wwan_rfkill, 3, status);
555 dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
556 }
557
558 release_buffer();
559}
560static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
561
562
563static int __init dell_setup_rfkill(void)
564{
565 int status;
566 int ret;
567 const char *product;
568
569 /*
570 * rfkill causes trouble on various non Latitudes, according to Dell
571 * actually testing the rfkill functionality is only done on Latitudes.
572 */
573 product = dmi_get_system_info(DMI_PRODUCT_NAME);
574 if (!force_rfkill && (!product || strncmp(product, "Latitude", 8)))
575 return 0;
576
577 get_buffer();
578 dell_send_request(buffer, 17, 11);
579 status = buffer->output[1];
580 buffer->input[0] = 0x2;
581 dell_send_request(buffer, 17, 11);
582 hwswitch_state = buffer->output[1];
583 release_buffer();
584
585 if (!(status & BIT(0))) {
586 if (force_rfkill) {
587 /* No hwsitch, clear all hw-controlled bits */
588 hwswitch_state &= ~7;
589 } else {
590 /* rfkill is only tested on laptops with a hwswitch */
591 return 0;
592 }
593 }
594
595 if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
596 wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
597 RFKILL_TYPE_WLAN,
598 &dell_rfkill_ops, (void *) 1);
599 if (!wifi_rfkill) {
600 ret = -ENOMEM;
601 goto err_wifi;
602 }
603 ret = rfkill_register(wifi_rfkill);
604 if (ret)
605 goto err_wifi;
606 }
607
608 if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
609 bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
610 &platform_device->dev,
611 RFKILL_TYPE_BLUETOOTH,
612 &dell_rfkill_ops, (void *) 2);
613 if (!bluetooth_rfkill) {
614 ret = -ENOMEM;
615 goto err_bluetooth;
616 }
617 ret = rfkill_register(bluetooth_rfkill);
618 if (ret)
619 goto err_bluetooth;
620 }
621
622 if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
623 wwan_rfkill = rfkill_alloc("dell-wwan",
624 &platform_device->dev,
625 RFKILL_TYPE_WWAN,
626 &dell_rfkill_ops, (void *) 3);
627 if (!wwan_rfkill) {
628 ret = -ENOMEM;
629 goto err_wwan;
630 }
631 ret = rfkill_register(wwan_rfkill);
632 if (ret)
633 goto err_wwan;
634 }
635
636 return 0;
637err_wwan:
638 rfkill_destroy(wwan_rfkill);
639 if (bluetooth_rfkill)
640 rfkill_unregister(bluetooth_rfkill);
641err_bluetooth:
642 rfkill_destroy(bluetooth_rfkill);
643 if (wifi_rfkill)
644 rfkill_unregister(wifi_rfkill);
645err_wifi:
646 rfkill_destroy(wifi_rfkill);
647
648 return ret;
649}
650
651static void dell_cleanup_rfkill(void)
652{
653 if (wifi_rfkill) {
654 rfkill_unregister(wifi_rfkill);
655 rfkill_destroy(wifi_rfkill);
656 }
657 if (bluetooth_rfkill) {
658 rfkill_unregister(bluetooth_rfkill);
659 rfkill_destroy(bluetooth_rfkill);
660 }
661 if (wwan_rfkill) {
662 rfkill_unregister(wwan_rfkill);
663 rfkill_destroy(wwan_rfkill);
664 }
665}
666
427static int dell_send_intensity(struct backlight_device *bd) 667static int dell_send_intensity(struct backlight_device *bd)
428{ 668{
429 int ret = 0; 669 int ret = 0;
@@ -515,6 +755,30 @@ static void touchpad_led_exit(void)
515 led_classdev_unregister(&touchpad_led); 755 led_classdev_unregister(&touchpad_led);
516} 756}
517 757
758static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
759 struct serio *port)
760{
761 static bool extended;
762
763 if (str & 0x20)
764 return false;
765
766 if (unlikely(data == 0xe0)) {
767 extended = true;
768 return false;
769 } else if (unlikely(extended)) {
770 switch (data) {
771 case 0x8:
772 schedule_delayed_work(&dell_rfkill_work,
773 round_jiffies_relative(HZ / 4));
774 break;
775 }
776 extended = false;
777 }
778
779 return false;
780}
781
518static int __init dell_init(void) 782static int __init dell_init(void)
519{ 783{
520 int max_intensity = 0; 784 int max_intensity = 0;
@@ -557,10 +821,26 @@ static int __init dell_init(void)
557 } 821 }
558 buffer = page_address(bufferpage); 822 buffer = page_address(bufferpage);
559 823
824 ret = dell_setup_rfkill();
825
826 if (ret) {
827 pr_warn("Unable to setup rfkill\n");
828 goto fail_rfkill;
829 }
830
831 ret = i8042_install_filter(dell_laptop_i8042_filter);
832 if (ret) {
833 pr_warn("Unable to install key filter\n");
834 goto fail_filter;
835 }
836
560 if (quirks && quirks->touchpad_led) 837 if (quirks && quirks->touchpad_led)
561 touchpad_led_init(&platform_device->dev); 838 touchpad_led_init(&platform_device->dev);
562 839
563 dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); 840 dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
841 if (dell_laptop_dir != NULL)
842 debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
843 &dell_debugfs_fops);
564 844
565#ifdef CONFIG_ACPI 845#ifdef CONFIG_ACPI
566 /* In the event of an ACPI backlight being available, don't 846 /* In the event of an ACPI backlight being available, don't
@@ -603,6 +883,11 @@ static int __init dell_init(void)
603 return 0; 883 return 0;
604 884
605fail_backlight: 885fail_backlight:
886 i8042_remove_filter(dell_laptop_i8042_filter);
887 cancel_delayed_work_sync(&dell_rfkill_work);
888fail_filter:
889 dell_cleanup_rfkill();
890fail_rfkill:
606 free_page((unsigned long)bufferpage); 891 free_page((unsigned long)bufferpage);
607fail_buffer: 892fail_buffer:
608 platform_device_del(platform_device); 893 platform_device_del(platform_device);
@@ -620,7 +905,10 @@ static void __exit dell_exit(void)
620 debugfs_remove_recursive(dell_laptop_dir); 905 debugfs_remove_recursive(dell_laptop_dir);
621 if (quirks && quirks->touchpad_led) 906 if (quirks && quirks->touchpad_led)
622 touchpad_led_exit(); 907 touchpad_led_exit();
908 i8042_remove_filter(dell_laptop_i8042_filter);
909 cancel_delayed_work_sync(&dell_rfkill_work);
623 backlight_device_unregister(dell_backlight_device); 910 backlight_device_unregister(dell_backlight_device);
911 dell_cleanup_rfkill();
624 if (platform_device) { 912 if (platform_device) {
625 platform_device_unregister(platform_device); 913 platform_device_unregister(platform_device);
626 platform_driver_unregister(&platform_driver); 914 platform_driver_unregister(&platform_driver);
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index fa9a2171cc13..60e0900bc117 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -130,7 +130,8 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
130 KEY_BRIGHTNESSUP, KEY_UNKNOWN, KEY_KBDILLUMTOGGLE, 130 KEY_BRIGHTNESSUP, KEY_UNKNOWN, KEY_KBDILLUMTOGGLE,
131 KEY_UNKNOWN, KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, 131 KEY_UNKNOWN, KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN,
132 KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, KEY_PROG2, 132 KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, KEY_PROG2,
133 KEY_UNKNOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 133 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
134 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_MICMUTE,
134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 137 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -139,8 +140,8 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
139 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 140 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
140 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 141 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
141 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 142 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
142 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
143 KEY_PROG3 144 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_PROG3
144}; 145};
145 146
146static struct input_dev *dell_wmi_input_dev; 147static struct input_dev *dell_wmi_input_dev;
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index aefcc32e5634..dec68e7a99c7 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1203,10 +1203,8 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc)
1203 int error; 1203 int error;
1204 1204
1205 input = input_allocate_device(); 1205 input = input_allocate_device();
1206 if (!input) { 1206 if (!input)
1207 pr_info("Unable to allocate input device\n");
1208 return -ENOMEM; 1207 return -ENOMEM;
1209 }
1210 1208
1211 input->name = "Asus EeePC extra buttons"; 1209 input->name = "Asus EeePC extra buttons";
1212 input->phys = EEEPC_LAPTOP_FILE "/input0"; 1210 input->phys = EEEPC_LAPTOP_FILE "/input0";
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1c86fa0857c8..8ba8956b5a48 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -54,6 +54,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
54#define HPWMI_HARDWARE_QUERY 0x4 54#define HPWMI_HARDWARE_QUERY 0x4
55#define HPWMI_WIRELESS_QUERY 0x5 55#define HPWMI_WIRELESS_QUERY 0x5
56#define HPWMI_HOTKEY_QUERY 0xc 56#define HPWMI_HOTKEY_QUERY 0xc
57#define HPWMI_FEATURE_QUERY 0xd
57#define HPWMI_WIRELESS2_QUERY 0x1b 58#define HPWMI_WIRELESS2_QUERY 0x1b
58#define HPWMI_POSTCODEERROR_QUERY 0x2a 59#define HPWMI_POSTCODEERROR_QUERY 0x2a
59 60
@@ -292,6 +293,17 @@ static int hp_wmi_tablet_state(void)
292 return (state & 0x4) ? 1 : 0; 293 return (state & 0x4) ? 1 : 0;
293} 294}
294 295
296static int hp_wmi_bios_2009_later(void)
297{
298 int state = 0;
299 int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state,
300 sizeof(state), sizeof(state));
301 if (ret)
302 return ret;
303
304 return (state & 0x10) ? 1 : 0;
305}
306
295static int hp_wmi_set_block(void *data, bool blocked) 307static int hp_wmi_set_block(void *data, bool blocked)
296{ 308{
297 enum hp_wmi_radio r = (enum hp_wmi_radio) data; 309 enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -871,7 +883,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
871 gps_rfkill = NULL; 883 gps_rfkill = NULL;
872 rfkill2_count = 0; 884 rfkill2_count = 0;
873 885
874 if (hp_wmi_rfkill_setup(device)) 886 if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device))
875 hp_wmi_rfkill2_setup(device); 887 hp_wmi_rfkill2_setup(device);
876 888
877 err = device_create_file(&device->dev, &dev_attr_display); 889 err = device_create_file(&device->dev, &dev_attr_display);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 6788acc22ab9..19ec95147f69 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -570,10 +570,8 @@ static int ideapad_input_init(struct ideapad_private *priv)
570 int error; 570 int error;
571 571
572 inputdev = input_allocate_device(); 572 inputdev = input_allocate_device();
573 if (!inputdev) { 573 if (!inputdev)
574 pr_info("Unable to allocate input device\n");
575 return -ENOMEM; 574 return -ENOMEM;
576 }
577 575
578 inputdev->name = "Ideapad extra buttons"; 576 inputdev->name = "Ideapad extra buttons";
579 inputdev->phys = "ideapad/input0"; 577 inputdev->phys = "ideapad/input0";
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 6b18aba82cfa..8d6775266d66 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -66,10 +66,8 @@ static int mfld_pb_probe(struct platform_device *pdev)
66 return -EINVAL; 66 return -EINVAL;
67 67
68 input = input_allocate_device(); 68 input = input_allocate_device();
69 if (!input) { 69 if (!input)
70 dev_err(&pdev->dev, "Input device allocation error\n");
71 return -ENOMEM; 70 return -ENOMEM;
72 }
73 71
74 input->name = pdev->name; 72 input->name = pdev->name;
75 input->phys = "power-button/input0"; 73 input->phys = "power-button/input0";
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index d654f831410d..60ea476a9130 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -58,12 +58,56 @@
58 * message handler is called within firmware. 58 * message handler is called within firmware.
59 */ 59 */
60 60
61#define IPC_BASE_ADDR 0xFF11C000 /* IPC1 base register address */
62#define IPC_MAX_ADDR 0x100 /* Maximum IPC regisers */
63#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */ 61#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
64#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */ 62#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
65#define IPC_I2C_BASE 0xFF12B000 /* I2C control register base address */ 63#define IPC_IOC 0x100 /* IPC command register IOC bit */
66#define IPC_I2C_MAX_ADDR 0x10 /* Maximum I2C regisers */ 64
65enum {
66 SCU_IPC_LINCROFT,
67 SCU_IPC_PENWELL,
68 SCU_IPC_CLOVERVIEW,
69 SCU_IPC_TANGIER,
70};
71
72/* intel scu ipc driver data*/
73struct intel_scu_ipc_pdata_t {
74 u32 ipc_base;
75 u32 i2c_base;
76 u32 ipc_len;
77 u32 i2c_len;
78 u8 irq_mode;
79};
80
81static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = {
82 [SCU_IPC_LINCROFT] = {
83 .ipc_base = 0xff11c000,
84 .i2c_base = 0xff12b000,
85 .ipc_len = 0x100,
86 .i2c_len = 0x10,
87 .irq_mode = 0,
88 },
89 [SCU_IPC_PENWELL] = {
90 .ipc_base = 0xff11c000,
91 .i2c_base = 0xff12b000,
92 .ipc_len = 0x100,
93 .i2c_len = 0x10,
94 .irq_mode = 1,
95 },
96 [SCU_IPC_CLOVERVIEW] = {
97 .ipc_base = 0xff11c000,
98 .i2c_base = 0xff12b000,
99 .ipc_len = 0x100,
100 .i2c_len = 0x10,
101 .irq_mode = 1,
102 },
103 [SCU_IPC_TANGIER] = {
104 .ipc_base = 0xff009000,
105 .i2c_base = 0xff00d000,
106 .ipc_len = 0x100,
107 .i2c_len = 0x10,
108 .irq_mode = 0,
109 },
110};
67 111
68static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id); 112static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
69static void ipc_remove(struct pci_dev *pdev); 113static void ipc_remove(struct pci_dev *pdev);
@@ -72,6 +116,8 @@ struct intel_scu_ipc_dev {
72 struct pci_dev *pdev; 116 struct pci_dev *pdev;
73 void __iomem *ipc_base; 117 void __iomem *ipc_base;
74 void __iomem *i2c_base; 118 void __iomem *i2c_base;
119 struct completion cmd_complete;
120 u8 irq_mode;
75}; 121};
76 122
77static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ 123static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
@@ -98,6 +144,10 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
98 */ 144 */
99static inline void ipc_command(u32 cmd) /* Send ipc command */ 145static inline void ipc_command(u32 cmd) /* Send ipc command */
100{ 146{
147 if (ipcdev.irq_mode) {
148 reinit_completion(&ipcdev.cmd_complete);
149 writel(cmd | IPC_IOC, ipcdev.ipc_base);
150 }
101 writel(cmd, ipcdev.ipc_base); 151 writel(cmd, ipcdev.ipc_base);
102} 152}
103 153
@@ -156,6 +206,30 @@ static inline int busy_loop(void) /* Wait till scu status is busy */
156 return 0; 206 return 0;
157} 207}
158 208
209/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
210static inline int ipc_wait_for_interrupt(void)
211{
212 int status;
213
214 if (!wait_for_completion_timeout(&ipcdev.cmd_complete, 3 * HZ)) {
215 struct device *dev = &ipcdev.pdev->dev;
216 dev_err(dev, "IPC timed out\n");
217 return -ETIMEDOUT;
218 }
219
220 status = ipc_read_status();
221
222 if ((status >> 1) & 1)
223 return -EIO;
224
225 return 0;
226}
227
228int intel_scu_ipc_check_status(void)
229{
230 return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop();
231}
232
159/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */ 233/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
160static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) 234static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
161{ 235{
@@ -196,8 +270,8 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
196 ipc_command(4 << 16 | id << 12 | 0 << 8 | op); 270 ipc_command(4 << 16 | id << 12 | 0 << 8 | op);
197 } 271 }
198 272
199 err = busy_loop(); 273 err = intel_scu_ipc_check_status();
200 if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */ 274 if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
201 /* Workaround: values are read as 0 without memcpy_fromio */ 275 /* Workaround: values are read as 0 without memcpy_fromio */
202 memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16); 276 memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16);
203 for (nc = 0; nc < count; nc++) 277 for (nc = 0; nc < count; nc++)
@@ -391,7 +465,7 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
391 return -ENODEV; 465 return -ENODEV;
392 } 466 }
393 ipc_command(sub << 12 | cmd); 467 ipc_command(sub << 12 | cmd);
394 err = busy_loop(); 468 err = intel_scu_ipc_check_status();
395 mutex_unlock(&ipclock); 469 mutex_unlock(&ipclock);
396 return err; 470 return err;
397} 471}
@@ -425,10 +499,12 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
425 ipc_data_writel(*in++, 4 * i); 499 ipc_data_writel(*in++, 4 * i);
426 500
427 ipc_command((inlen << 16) | (sub << 12) | cmd); 501 ipc_command((inlen << 16) | (sub << 12) | cmd);
428 err = busy_loop(); 502 err = intel_scu_ipc_check_status();
429 503
430 for (i = 0; i < outlen; i++) 504 if (!err) {
431 *out++ = ipc_data_readl(4 * i); 505 for (i = 0; i < outlen; i++)
506 *out++ = ipc_data_readl(4 * i);
507 }
432 508
433 mutex_unlock(&ipclock); 509 mutex_unlock(&ipclock);
434 return err; 510 return err;
@@ -491,6 +567,9 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
491 */ 567 */
492static irqreturn_t ioc(int irq, void *dev_id) 568static irqreturn_t ioc(int irq, void *dev_id)
493{ 569{
570 if (ipcdev.irq_mode)
571 complete(&ipcdev.cmd_complete);
572
494 return IRQ_HANDLED; 573 return IRQ_HANDLED;
495} 574}
496 575
@@ -504,13 +583,18 @@ static irqreturn_t ioc(int irq, void *dev_id)
504 */ 583 */
505static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id) 584static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
506{ 585{
507 int err; 586 int err, pid;
587 struct intel_scu_ipc_pdata_t *pdata;
508 resource_size_t pci_resource; 588 resource_size_t pci_resource;
509 589
510 if (ipcdev.pdev) /* We support only one SCU */ 590 if (ipcdev.pdev) /* We support only one SCU */
511 return -EBUSY; 591 return -EBUSY;
512 592
593 pid = id->driver_data;
594 pdata = &intel_scu_ipc_pdata[pid];
595
513 ipcdev.pdev = pci_dev_get(dev); 596 ipcdev.pdev = pci_dev_get(dev);
597 ipcdev.irq_mode = pdata->irq_mode;
514 598
515 err = pci_enable_device(dev); 599 err = pci_enable_device(dev);
516 if (err) 600 if (err)
@@ -524,14 +608,16 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
524 if (!pci_resource) 608 if (!pci_resource)
525 return -ENOMEM; 609 return -ENOMEM;
526 610
611 init_completion(&ipcdev.cmd_complete);
612
527 if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev)) 613 if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
528 return -EBUSY; 614 return -EBUSY;
529 615
530 ipcdev.ipc_base = ioremap_nocache(IPC_BASE_ADDR, IPC_MAX_ADDR); 616 ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len);
531 if (!ipcdev.ipc_base) 617 if (!ipcdev.ipc_base)
532 return -ENOMEM; 618 return -ENOMEM;
533 619
534 ipcdev.i2c_base = ioremap_nocache(IPC_I2C_BASE, IPC_I2C_MAX_ADDR); 620 ipcdev.i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
535 if (!ipcdev.i2c_base) { 621 if (!ipcdev.i2c_base) {
536 iounmap(ipcdev.ipc_base); 622 iounmap(ipcdev.ipc_base);
537 return -ENOMEM; 623 return -ENOMEM;
@@ -564,7 +650,10 @@ static void ipc_remove(struct pci_dev *pdev)
564} 650}
565 651
566static DEFINE_PCI_DEVICE_TABLE(pci_ids) = { 652static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
567 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)}, 653 {PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT},
654 {PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL},
655 {PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW},
656 {PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER},
568 { 0,} 657 { 0,}
569}; 658};
570MODULE_DEVICE_TABLE(pci, pci_ids); 659MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 10d12b221601..3008fd20572e 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -490,11 +490,8 @@ static int acpi_pcc_init_input(struct pcc_acpi *pcc)
490 int error; 490 int error;
491 491
492 input_dev = input_allocate_device(); 492 input_dev = input_allocate_device();
493 if (!input_dev) { 493 if (!input_dev)
494 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
495 "Couldn't allocate input device for hotkey"));
496 return -ENOMEM; 494 return -ENOMEM;
497 }
498 495
499 input_dev->name = ACPI_PCC_DRIVER_NAME; 496 input_dev->name = ACPI_PCC_DRIVER_NAME;
500 input_dev->phys = ACPI_PCC_INPUT_PHYS; 497 input_dev->phys = ACPI_PCC_INPUT_PHYS;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 47caab0ea7a1..fb233ae7bb0e 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -140,12 +140,12 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
140 "on the model (default: no change from current value)"); 140 "on the model (default: no change from current value)");
141 141
142#ifdef CONFIG_PM_SLEEP 142#ifdef CONFIG_PM_SLEEP
143static void sony_nc_kbd_backlight_resume(void);
144static void sony_nc_thermal_resume(void); 143static void sony_nc_thermal_resume(void);
145#endif 144#endif
146static int sony_nc_kbd_backlight_setup(struct platform_device *pd, 145static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
147 unsigned int handle); 146 unsigned int handle);
148static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd); 147static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd,
148 unsigned int handle);
149 149
150static int sony_nc_battery_care_setup(struct platform_device *pd, 150static int sony_nc_battery_care_setup(struct platform_device *pd,
151 unsigned int handle); 151 unsigned int handle);
@@ -304,8 +304,8 @@ static int sony_laptop_input_keycode_map[] = {
304 KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */ 304 KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */
305 KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */ 305 KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */
306 KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */ 306 KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */
307 KEY_FN_F1, /* 17 SONYPI_EVENT_FNKEY_1 */ 307 KEY_FN_1, /* 17 SONYPI_EVENT_FNKEY_1 */
308 KEY_FN_F2, /* 18 SONYPI_EVENT_FNKEY_2 */ 308 KEY_FN_2, /* 18 SONYPI_EVENT_FNKEY_2 */
309 KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */ 309 KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */
310 KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */ 310 KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */
311 KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */ 311 KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */
@@ -1444,7 +1444,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
1444 case 0x014b: 1444 case 0x014b:
1445 case 0x014c: 1445 case 0x014c:
1446 case 0x0163: 1446 case 0x0163:
1447 sony_nc_kbd_backlight_cleanup(pd); 1447 sony_nc_kbd_backlight_cleanup(pd, handle);
1448 break; 1448 break;
1449 default: 1449 default:
1450 continue; 1450 continue;
@@ -1486,13 +1486,6 @@ static void sony_nc_function_resume(void)
1486 case 0x0135: 1486 case 0x0135:
1487 sony_nc_rfkill_update(); 1487 sony_nc_rfkill_update();
1488 break; 1488 break;
1489 case 0x0137:
1490 case 0x0143:
1491 case 0x014b:
1492 case 0x014c:
1493 case 0x0163:
1494 sony_nc_kbd_backlight_resume();
1495 break;
1496 default: 1489 default:
1497 continue; 1490 continue;
1498 } 1491 }
@@ -1822,6 +1815,12 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
1822 int result; 1815 int result;
1823 int ret = 0; 1816 int ret = 0;
1824 1817
1818 if (kbdbl_ctl) {
1819 pr_warn("handle 0x%.4x: keyboard backlight setup already done for 0x%.4x\n",
1820 handle, kbdbl_ctl->handle);
1821 return -EBUSY;
1822 }
1823
1825 /* verify the kbd backlight presence, these handles are not used for 1824 /* verify the kbd backlight presence, these handles are not used for
1826 * keyboard backlight only 1825 * keyboard backlight only
1827 */ 1826 */
@@ -1881,9 +1880,10 @@ outkzalloc:
1881 return ret; 1880 return ret;
1882} 1881}
1883 1882
1884static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) 1883static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd,
1884 unsigned int handle)
1885{ 1885{
1886 if (kbdbl_ctl) { 1886 if (kbdbl_ctl && handle == kbdbl_ctl->handle) {
1887 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); 1887 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
1888 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr); 1888 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
1889 kfree(kbdbl_ctl); 1889 kfree(kbdbl_ctl);
@@ -1891,25 +1891,6 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
1891 } 1891 }
1892} 1892}
1893 1893
1894#ifdef CONFIG_PM_SLEEP
1895static void sony_nc_kbd_backlight_resume(void)
1896{
1897 int ignore = 0;
1898
1899 if (!kbdbl_ctl)
1900 return;
1901
1902 if (kbdbl_ctl->mode == 0)
1903 sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
1904 &ignore);
1905
1906 if (kbdbl_ctl->timeout != 0)
1907 sony_call_snc_handle(kbdbl_ctl->handle,
1908 (kbdbl_ctl->base + 0x200) |
1909 (kbdbl_ctl->timeout << 0x10), &ignore);
1910}
1911#endif
1912
1913struct battery_care_control { 1894struct battery_care_control {
1914 struct device_attribute attrs[2]; 1895 struct device_attribute attrs[2];
1915 unsigned int handle; 1896 unsigned int handle;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 05e046aa5e31..58b0274d24cc 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6438,7 +6438,12 @@ static struct ibm_struct brightness_driver_data = {
6438#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control" 6438#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control"
6439#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME 6439#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME
6440 6440
6441static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */ 6441#if SNDRV_CARDS <= 32
6442#define DEFAULT_ALSA_IDX ~((1 << (SNDRV_CARDS - 3)) - 1)
6443#else
6444#define DEFAULT_ALSA_IDX ~((1 << (32 - 3)) - 1)
6445#endif
6446static int alsa_index = DEFAULT_ALSA_IDX; /* last three slots */
6442static char *alsa_id = "ThinkPadEC"; 6447static char *alsa_id = "ThinkPadEC";
6443static bool alsa_enable = SNDRV_DEFAULT_ENABLE1; 6448static bool alsa_enable = SNDRV_DEFAULT_ENABLE1;
6444 6449
@@ -9163,7 +9168,6 @@ static int __init thinkpad_acpi_module_init(void)
9163 mutex_init(&tpacpi_inputdev_send_mutex); 9168 mutex_init(&tpacpi_inputdev_send_mutex);
9164 tpacpi_inputdev = input_allocate_device(); 9169 tpacpi_inputdev = input_allocate_device();
9165 if (!tpacpi_inputdev) { 9170 if (!tpacpi_inputdev) {
9166 pr_err("unable to allocate input device\n");
9167 thinkpad_acpi_module_exit(); 9171 thinkpad_acpi_module_exit();
9168 return -ENOMEM; 9172 return -ENOMEM;
9169 } else { 9173 } else {
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 67897c8740ba..e597de05e6c2 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -97,10 +97,8 @@ static int acpi_topstar_init_hkey(struct topstar_hkey *hkey)
97 int error; 97 int error;
98 98
99 input = input_allocate_device(); 99 input = input_allocate_device();
100 if (!input) { 100 if (!input)
101 pr_err("Unable to allocate input device\n");
102 return -ENOMEM; 101 return -ENOMEM;
103 }
104 102
105 input->name = "Topstar Laptop extra buttons"; 103 input->name = "Topstar Laptop extra buttons";
106 input->phys = "topstar/input0"; 104 input->phys = "topstar/input0";
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 0cfadb65f7c6..7fce391818d3 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -975,10 +975,8 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
975 u32 hci_result; 975 u32 hci_result;
976 976
977 dev->hotkey_dev = input_allocate_device(); 977 dev->hotkey_dev = input_allocate_device();
978 if (!dev->hotkey_dev) { 978 if (!dev->hotkey_dev)
979 pr_info("Unable to register input device\n");
980 return -ENOMEM; 979 return -ENOMEM;
981 }
982 980
983 dev->hotkey_dev->name = "Toshiba input device"; 981 dev->hotkey_dev->name = "Toshiba input device";
984 dev->hotkey_dev->phys = "toshiba_acpi/input0"; 982 dev->hotkey_dev->phys = "toshiba_acpi/input0";
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 62e8c221d01e..c2e7b2657aeb 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -672,8 +672,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
672 struct wmi_block *wblock; 672 struct wmi_block *wblock;
673 673
674 wblock = dev_get_drvdata(dev); 674 wblock = dev_get_drvdata(dev);
675 if (!wblock) 675 if (!wblock) {
676 return -ENOMEM; 676 strcat(buf, "\n");
677 return strlen(buf);
678 }
677 679
678 wmi_gtoa(wblock->gblock.guid, guid_string); 680 wmi_gtoa(wblock->gblock.guid, guid_string);
679 681
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 6936e0acedcd..f748cc8cbb03 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -197,6 +197,11 @@ static int pnp_bus_freeze(struct device *dev)
197 return __pnp_bus_suspend(dev, PMSG_FREEZE); 197 return __pnp_bus_suspend(dev, PMSG_FREEZE);
198} 198}
199 199
200static int pnp_bus_poweroff(struct device *dev)
201{
202 return __pnp_bus_suspend(dev, PMSG_HIBERNATE);
203}
204
200static int pnp_bus_resume(struct device *dev) 205static int pnp_bus_resume(struct device *dev)
201{ 206{
202 struct pnp_dev *pnp_dev = to_pnp_dev(dev); 207 struct pnp_dev *pnp_dev = to_pnp_dev(dev);
@@ -234,9 +239,14 @@ static int pnp_bus_resume(struct device *dev)
234} 239}
235 240
236static const struct dev_pm_ops pnp_bus_dev_pm_ops = { 241static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
242 /* Suspend callbacks */
237 .suspend = pnp_bus_suspend, 243 .suspend = pnp_bus_suspend,
238 .freeze = pnp_bus_freeze,
239 .resume = pnp_bus_resume, 244 .resume = pnp_bus_resume,
245 /* Hibernate callbacks */
246 .freeze = pnp_bus_freeze,
247 .thaw = pnp_bus_resume,
248 .poweroff = pnp_bus_poweroff,
249 .restore = pnp_bus_resume,
240}; 250};
241 251
242struct bus_type pnp_bus_type = { 252struct bus_type pnp_bus_type = {
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 747826d99059..14655a0f0431 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -89,7 +89,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
89 89
90 pnp_dbg(&dev->dev, "set resources\n"); 90 pnp_dbg(&dev->dev, "set resources\n");
91 91
92 handle = DEVICE_ACPI_HANDLE(&dev->dev); 92 handle = ACPI_HANDLE(&dev->dev);
93 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { 93 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
94 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 94 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
95 return -ENODEV; 95 return -ENODEV;
@@ -122,7 +122,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
122 122
123 dev_dbg(&dev->dev, "disable resources\n"); 123 dev_dbg(&dev->dev, "disable resources\n");
124 124
125 handle = DEVICE_ACPI_HANDLE(&dev->dev); 125 handle = ACPI_HANDLE(&dev->dev);
126 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { 126 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
127 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 127 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
128 return 0; 128 return 0;
@@ -144,7 +144,7 @@ static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
144 struct acpi_device *acpi_dev; 144 struct acpi_device *acpi_dev;
145 acpi_handle handle; 145 acpi_handle handle;
146 146
147 handle = DEVICE_ACPI_HANDLE(&dev->dev); 147 handle = ACPI_HANDLE(&dev->dev);
148 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { 148 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
149 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 149 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
150 return false; 150 return false;
@@ -159,7 +159,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
159 acpi_handle handle; 159 acpi_handle handle;
160 int error = 0; 160 int error = 0;
161 161
162 handle = DEVICE_ACPI_HANDLE(&dev->dev); 162 handle = ACPI_HANDLE(&dev->dev);
163 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { 163 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
164 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 164 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
165 return 0; 165 return 0;
@@ -194,7 +194,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
194static int pnpacpi_resume(struct pnp_dev *dev) 194static int pnpacpi_resume(struct pnp_dev *dev)
195{ 195{
196 struct acpi_device *acpi_dev; 196 struct acpi_device *acpi_dev;
197 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); 197 acpi_handle handle = ACPI_HANDLE(&dev->dev);
198 int error = 0; 198 int error = 0;
199 199
200 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { 200 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 8d0fe431dbdd..84419af16f77 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -377,9 +377,14 @@ static void create_power_zone_common_attributes(
377 if (power_zone->ops->get_max_energy_range_uj) 377 if (power_zone->ops->get_max_energy_range_uj)
378 power_zone->zone_dev_attrs[count++] = 378 power_zone->zone_dev_attrs[count++] =
379 &dev_attr_max_energy_range_uj.attr; 379 &dev_attr_max_energy_range_uj.attr;
380 if (power_zone->ops->get_energy_uj) 380 if (power_zone->ops->get_energy_uj) {
381 if (power_zone->ops->reset_energy_uj)
382 dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
383 else
384 dev_attr_energy_uj.attr.mode = S_IRUGO;
381 power_zone->zone_dev_attrs[count++] = 385 power_zone->zone_dev_attrs[count++] =
382 &dev_attr_energy_uj.attr; 386 &dev_attr_energy_uj.attr;
387 }
383 if (power_zone->ops->get_power_uw) 388 if (power_zone->ops->get_power_uw)
384 power_zone->zone_dev_attrs[count++] = 389 power_zone->zone_dev_attrs[count++] =
385 &dev_attr_power_uw.attr; 390 &dev_attr_power_uw.attr;
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index 724706a97dc4..fd3154d86901 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -174,6 +174,33 @@ static const struct regulator_desc arizona_micsupp = {
174 .owner = THIS_MODULE, 174 .owner = THIS_MODULE,
175}; 175};
176 176
177static const struct regulator_linear_range arizona_micsupp_ext_ranges[] = {
178 REGULATOR_LINEAR_RANGE(900000, 0, 0x14, 25000),
179 REGULATOR_LINEAR_RANGE(1500000, 0x15, 0x27, 100000),
180};
181
182static const struct regulator_desc arizona_micsupp_ext = {
183 .name = "MICVDD",
184 .supply_name = "CPVDD",
185 .type = REGULATOR_VOLTAGE,
186 .n_voltages = 40,
187 .ops = &arizona_micsupp_ops,
188
189 .vsel_reg = ARIZONA_LDO2_CONTROL_1,
190 .vsel_mask = ARIZONA_LDO2_VSEL_MASK,
191 .enable_reg = ARIZONA_MIC_CHARGE_PUMP_1,
192 .enable_mask = ARIZONA_CPMIC_ENA,
193 .bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1,
194 .bypass_mask = ARIZONA_CPMIC_BYPASS,
195
196 .linear_ranges = arizona_micsupp_ext_ranges,
197 .n_linear_ranges = ARRAY_SIZE(arizona_micsupp_ext_ranges),
198
199 .enable_time = 3000,
200
201 .owner = THIS_MODULE,
202};
203
177static const struct regulator_init_data arizona_micsupp_default = { 204static const struct regulator_init_data arizona_micsupp_default = {
178 .constraints = { 205 .constraints = {
179 .valid_ops_mask = REGULATOR_CHANGE_STATUS | 206 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
@@ -186,9 +213,22 @@ static const struct regulator_init_data arizona_micsupp_default = {
186 .num_consumer_supplies = 1, 213 .num_consumer_supplies = 1,
187}; 214};
188 215
216static const struct regulator_init_data arizona_micsupp_ext_default = {
217 .constraints = {
218 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
219 REGULATOR_CHANGE_VOLTAGE |
220 REGULATOR_CHANGE_BYPASS,
221 .min_uV = 900000,
222 .max_uV = 3300000,
223 },
224
225 .num_consumer_supplies = 1,
226};
227
189static int arizona_micsupp_probe(struct platform_device *pdev) 228static int arizona_micsupp_probe(struct platform_device *pdev)
190{ 229{
191 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); 230 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
231 const struct regulator_desc *desc;
192 struct regulator_config config = { }; 232 struct regulator_config config = { };
193 struct arizona_micsupp *micsupp; 233 struct arizona_micsupp *micsupp;
194 int ret; 234 int ret;
@@ -207,7 +247,17 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
207 * default init_data for it. This will be overridden with 247 * default init_data for it. This will be overridden with
208 * platform data if provided. 248 * platform data if provided.
209 */ 249 */
210 micsupp->init_data = arizona_micsupp_default; 250 switch (arizona->type) {
251 case WM5110:
252 desc = &arizona_micsupp_ext;
253 micsupp->init_data = arizona_micsupp_ext_default;
254 break;
255 default:
256 desc = &arizona_micsupp;
257 micsupp->init_data = arizona_micsupp_default;
258 break;
259 }
260
211 micsupp->init_data.consumer_supplies = &micsupp->supply; 261 micsupp->init_data.consumer_supplies = &micsupp->supply;
212 micsupp->supply.supply = "MICVDD"; 262 micsupp->supply.supply = "MICVDD";
213 micsupp->supply.dev_name = dev_name(arizona->dev); 263 micsupp->supply.dev_name = dev_name(arizona->dev);
@@ -226,7 +276,7 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
226 ARIZONA_CPMIC_BYPASS, 0); 276 ARIZONA_CPMIC_BYPASS, 0);
227 277
228 micsupp->regulator = devm_regulator_register(&pdev->dev, 278 micsupp->regulator = devm_regulator_register(&pdev->dev,
229 &arizona_micsupp, 279 desc,
230 &config); 280 &config);
231 if (IS_ERR(micsupp->regulator)) { 281 if (IS_ERR(micsupp->regulator)) {
232 ret = PTR_ERR(micsupp->regulator); 282 ret = PTR_ERR(micsupp->regulator);
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 5917fe3dc983..b9f1d24c6812 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -590,8 +590,8 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
590 default: 590 default:
591 return -EINVAL; 591 return -EINVAL;
592 } 592 }
593 ret <<= ffs(mask) - 1;
593 val = ret & mask; 594 val = ret & mask;
594 val <<= ffs(mask) - 1;
595 return as3722_update_bits(as3722, reg, mask, val); 595 return as3722_update_bits(as3722, reg, mask, val);
596} 596}
597 597
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 6382f0af353b..d85f31385b24 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -119,6 +119,11 @@ static const char *rdev_get_name(struct regulator_dev *rdev)
119 return ""; 119 return "";
120} 120}
121 121
122static bool have_full_constraints(void)
123{
124 return has_full_constraints || of_have_populated_dt();
125}
126
122/** 127/**
123 * of_get_regulator - get a regulator device node based on supply name 128 * of_get_regulator - get a regulator device node based on supply name
124 * @dev: Device pointer for the consumer (of regulator) device 129 * @dev: Device pointer for the consumer (of regulator) device
@@ -1340,7 +1345,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
1340 * Assume that a regulator is physically present and enabled 1345 * Assume that a regulator is physically present and enabled
1341 * even if it isn't hooked up and just provide a dummy. 1346 * even if it isn't hooked up and just provide a dummy.
1342 */ 1347 */
1343 if (has_full_constraints && allow_dummy) { 1348 if (have_full_constraints() && allow_dummy) {
1344 pr_warn("%s supply %s not found, using dummy regulator\n", 1349 pr_warn("%s supply %s not found, using dummy regulator\n",
1345 devname, id); 1350 devname, id);
1346 1351
@@ -2184,6 +2189,9 @@ int regulator_list_voltage(struct regulator *regulator, unsigned selector)
2184 struct regulator_ops *ops = rdev->desc->ops; 2189 struct regulator_ops *ops = rdev->desc->ops;
2185 int ret; 2190 int ret;
2186 2191
2192 if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector)
2193 return rdev->desc->fixed_uV;
2194
2187 if (!ops->list_voltage || selector >= rdev->desc->n_voltages) 2195 if (!ops->list_voltage || selector >= rdev->desc->n_voltages)
2188 return -EINVAL; 2196 return -EINVAL;
2189 2197
@@ -3624,7 +3632,7 @@ int regulator_suspend_finish(void)
3624 if (error) 3632 if (error)
3625 ret = error; 3633 ret = error;
3626 } else { 3634 } else {
3627 if (!has_full_constraints) 3635 if (!have_full_constraints())
3628 goto unlock; 3636 goto unlock;
3629 if (!ops->disable) 3637 if (!ops->disable)
3630 goto unlock; 3638 goto unlock;
@@ -3822,7 +3830,7 @@ static int __init regulator_init_complete(void)
3822 if (!enabled) 3830 if (!enabled)
3823 goto unlock; 3831 goto unlock;
3824 3832
3825 if (has_full_constraints) { 3833 if (have_full_constraints()) {
3826 /* We log since this may kill the system if it 3834 /* We log since this may kill the system if it
3827 * goes wrong. */ 3835 * goes wrong. */
3828 rdev_info(rdev, "disabling\n"); 3836 rdev_info(rdev, "disabling\n");
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 04406a918c04..234960dc9607 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -139,6 +139,7 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
139 struct property *prop; 139 struct property *prop;
140 const char *regtype; 140 const char *regtype;
141 int proplen, gpio, i; 141 int proplen, gpio, i;
142 int ret;
142 143
143 config = devm_kzalloc(dev, 144 config = devm_kzalloc(dev,
144 sizeof(struct gpio_regulator_config), 145 sizeof(struct gpio_regulator_config),
@@ -202,7 +203,11 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
202 } 203 }
203 config->nr_states = i; 204 config->nr_states = i;
204 205
205 of_property_read_string(np, "regulator-type", &regtype); 206 ret = of_property_read_string(np, "regulator-type", &regtype);
207 if (ret < 0) {
208 dev_err(dev, "Missing 'regulator-type' property\n");
209 return ERR_PTR(-EINVAL);
210 }
206 211
207 if (!strncmp("voltage", regtype, 7)) 212 if (!strncmp("voltage", regtype, 7))
208 config->type = REGULATOR_VOLTAGE; 213 config->type = REGULATOR_VOLTAGE;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index ba67b2c4e2e7..8b5e4c712a01 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -38,7 +38,7 @@
38 38
39#define PFUZE100_DEVICEID 0x0 39#define PFUZE100_DEVICEID 0x0
40#define PFUZE100_REVID 0x3 40#define PFUZE100_REVID 0x3
41#define PFUZE100_FABID 0x3 41#define PFUZE100_FABID 0x4
42 42
43#define PFUZE100_SW1ABVOL 0x20 43#define PFUZE100_SW1ABVOL 0x20
44#define PFUZE100_SW1CVOL 0x2e 44#define PFUZE100_SW1CVOL 0x2e
@@ -308,9 +308,15 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
308 if (ret) 308 if (ret)
309 return ret; 309 return ret;
310 310
311 if (value & 0x0f) { 311 switch (value & 0x0f) {
312 dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); 312 /* Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 as ID=8 */
313 return -ENODEV; 313 case 0x8:
314 dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
315 case 0x0:
316 break;
317 default:
318 dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
319 return -ENODEV;
314 } 320 }
315 321
316 ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value); 322 ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value);
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index cbf91e25cf7f..aeb40aad0ae7 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -925,7 +925,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
925 config.dev = s5m8767->dev; 925 config.dev = s5m8767->dev;
926 config.init_data = pdata->regulators[i].initdata; 926 config.init_data = pdata->regulators[i].initdata;
927 config.driver_data = s5m8767; 927 config.driver_data = s5m8767;
928 config.regmap = iodev->regmap; 928 config.regmap = iodev->regmap_pmic;
929 config.of_node = pdata->regulators[i].reg_node; 929 config.of_node = pdata->regulators[i].reg_node;
930 930
931 rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id], 931 rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 15f166a470a7..007730222116 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -626,7 +626,7 @@ comment "Platform RTC drivers"
626 626
627config RTC_DRV_CMOS 627config RTC_DRV_CMOS
628 tristate "PC-style 'CMOS'" 628 tristate "PC-style 'CMOS'"
629 depends on X86 || ALPHA || ARM || M32R || ATARI || PPC || MIPS || SPARC64 629 depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64
630 default y if X86 630 default y if X86
631 help 631 help
632 Say "yes" here to get direct support for the real time clock 632 Say "yes" here to get direct support for the real time clock
@@ -643,6 +643,14 @@ config RTC_DRV_CMOS
643 This driver can also be built as a module. If so, the module 643 This driver can also be built as a module. If so, the module
644 will be called rtc-cmos. 644 will be called rtc-cmos.
645 645
646config RTC_DRV_ALPHA
647 bool "Alpha PC-style CMOS"
648 depends on ALPHA
649 default y
650 help
651 Direct support for the real-time clock found on every Alpha
652 system, specifically MC146818 compatibles. If in doubt, say Y.
653
646config RTC_DRV_VRTC 654config RTC_DRV_VRTC
647 tristate "Virtual RTC for Intel MID platforms" 655 tristate "Virtual RTC for Intel MID platforms"
648 depends on X86_INTEL_MID 656 depends on X86_INTEL_MID
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 8b2cd8a5a2ff..3281c90691c3 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -220,6 +220,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
220 220
221 at91_alarm_year = tm.tm_year; 221 at91_alarm_year = tm.tm_year;
222 222
223 tm.tm_mon = alrm->time.tm_mon;
224 tm.tm_mday = alrm->time.tm_mday;
223 tm.tm_hour = alrm->time.tm_hour; 225 tm.tm_hour = alrm->time.tm_hour;
224 tm.tm_min = alrm->time.tm_min; 226 tm.tm_min = alrm->time.tm_min;
225 tm.tm_sec = alrm->time.tm_sec; 227 tm.tm_sec = alrm->time.tm_sec;
@@ -428,6 +430,14 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
428 return 0; 430 return 0;
429} 431}
430 432
433static void at91_rtc_shutdown(struct platform_device *pdev)
434{
435 /* Disable all interrupts */
436 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
437 AT91_RTC_SECEV | AT91_RTC_TIMEV |
438 AT91_RTC_CALEV);
439}
440
431#ifdef CONFIG_PM_SLEEP 441#ifdef CONFIG_PM_SLEEP
432 442
433/* AT91RM9200 RTC Power management control */ 443/* AT91RM9200 RTC Power management control */
@@ -466,6 +476,7 @@ static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
466 476
467static struct platform_driver at91_rtc_driver = { 477static struct platform_driver at91_rtc_driver = {
468 .remove = __exit_p(at91_rtc_remove), 478 .remove = __exit_p(at91_rtc_remove),
479 .shutdown = at91_rtc_shutdown,
469 .driver = { 480 .driver = {
470 .name = "at91_rtc", 481 .name = "at91_rtc",
471 .owner = THIS_MODULE, 482 .owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index b7fd02bc0a14..ae8119dc2846 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -28,10 +28,20 @@
28#include <linux/mfd/samsung/irq.h> 28#include <linux/mfd/samsung/irq.h>
29#include <linux/mfd/samsung/rtc.h> 29#include <linux/mfd/samsung/rtc.h>
30 30
31/*
32 * Maximum number of retries for checking changes in UDR field
33 * of SEC_RTC_UDR_CON register (to limit possible endless loop).
34 *
35 * After writing to RTC registers (setting time or alarm) read the UDR field
36 * in SEC_RTC_UDR_CON register. UDR is auto-cleared when data have
37 * been transferred.
38 */
39#define UDR_READ_RETRY_CNT 5
40
31struct s5m_rtc_info { 41struct s5m_rtc_info {
32 struct device *dev; 42 struct device *dev;
33 struct sec_pmic_dev *s5m87xx; 43 struct sec_pmic_dev *s5m87xx;
34 struct regmap *rtc; 44 struct regmap *regmap;
35 struct rtc_device *rtc_dev; 45 struct rtc_device *rtc_dev;
36 int irq; 46 int irq;
37 int device_type; 47 int device_type;
@@ -84,12 +94,31 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
84 } 94 }
85} 95}
86 96
97/*
98 * Read RTC_UDR_CON register and wait till UDR field is cleared.
99 * This indicates that time/alarm update ended.
100 */
101static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
102{
103 int ret, retry = UDR_READ_RETRY_CNT;
104 unsigned int data;
105
106 do {
107 ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
108 } while (--retry && (data & RTC_UDR_MASK) && !ret);
109
110 if (!retry)
111 dev_err(info->dev, "waiting for UDR update, reached max number of retries\n");
112
113 return ret;
114}
115
87static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info) 116static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
88{ 117{
89 int ret; 118 int ret;
90 unsigned int data; 119 unsigned int data;
91 120
92 ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); 121 ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
93 if (ret < 0) { 122 if (ret < 0) {
94 dev_err(info->dev, "failed to read update reg(%d)\n", ret); 123 dev_err(info->dev, "failed to read update reg(%d)\n", ret);
95 return ret; 124 return ret;
@@ -98,15 +127,13 @@ static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
98 data |= RTC_TIME_EN_MASK; 127 data |= RTC_TIME_EN_MASK;
99 data |= RTC_UDR_MASK; 128 data |= RTC_UDR_MASK;
100 129
101 ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data); 130 ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data);
102 if (ret < 0) { 131 if (ret < 0) {
103 dev_err(info->dev, "failed to write update reg(%d)\n", ret); 132 dev_err(info->dev, "failed to write update reg(%d)\n", ret);
104 return ret; 133 return ret;
105 } 134 }
106 135
107 do { 136 ret = s5m8767_wait_for_udr_update(info);
108 ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
109 } while ((data & RTC_UDR_MASK) && !ret);
110 137
111 return ret; 138 return ret;
112} 139}
@@ -116,7 +143,7 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
116 int ret; 143 int ret;
117 unsigned int data; 144 unsigned int data;
118 145
119 ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); 146 ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
120 if (ret < 0) { 147 if (ret < 0) {
121 dev_err(info->dev, "%s: fail to read update reg(%d)\n", 148 dev_err(info->dev, "%s: fail to read update reg(%d)\n",
122 __func__, ret); 149 __func__, ret);
@@ -126,16 +153,14 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
126 data &= ~RTC_TIME_EN_MASK; 153 data &= ~RTC_TIME_EN_MASK;
127 data |= RTC_UDR_MASK; 154 data |= RTC_UDR_MASK;
128 155
129 ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data); 156 ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data);
130 if (ret < 0) { 157 if (ret < 0) {
131 dev_err(info->dev, "%s: fail to write update reg(%d)\n", 158 dev_err(info->dev, "%s: fail to write update reg(%d)\n",
132 __func__, ret); 159 __func__, ret);
133 return ret; 160 return ret;
134 } 161 }
135 162
136 do { 163 ret = s5m8767_wait_for_udr_update(info);
137 ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
138 } while ((data & RTC_UDR_MASK) && !ret);
139 164
140 return ret; 165 return ret;
141} 166}
@@ -178,7 +203,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
178 u8 data[8]; 203 u8 data[8];
179 int ret; 204 int ret;
180 205
181 ret = regmap_bulk_read(info->rtc, SEC_RTC_SEC, data, 8); 206 ret = regmap_bulk_read(info->regmap, SEC_RTC_SEC, data, 8);
182 if (ret < 0) 207 if (ret < 0)
183 return ret; 208 return ret;
184 209
@@ -226,7 +251,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
226 1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday, 251 1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday,
227 tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday); 252 tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
228 253
229 ret = regmap_raw_write(info->rtc, SEC_RTC_SEC, data, 8); 254 ret = regmap_raw_write(info->regmap, SEC_RTC_SEC, data, 8);
230 if (ret < 0) 255 if (ret < 0)
231 return ret; 256 return ret;
232 257
@@ -242,20 +267,20 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
242 unsigned int val; 267 unsigned int val;
243 int ret, i; 268 int ret, i;
244 269
245 ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); 270 ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
246 if (ret < 0) 271 if (ret < 0)
247 return ret; 272 return ret;
248 273
249 switch (info->device_type) { 274 switch (info->device_type) {
250 case S5M8763X: 275 case S5M8763X:
251 s5m8763_data_to_tm(data, &alrm->time); 276 s5m8763_data_to_tm(data, &alrm->time);
252 ret = regmap_read(info->rtc, SEC_ALARM0_CONF, &val); 277 ret = regmap_read(info->regmap, SEC_ALARM0_CONF, &val);
253 if (ret < 0) 278 if (ret < 0)
254 return ret; 279 return ret;
255 280
256 alrm->enabled = !!val; 281 alrm->enabled = !!val;
257 282
258 ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val); 283 ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val);
259 if (ret < 0) 284 if (ret < 0)
260 return ret; 285 return ret;
261 286
@@ -278,7 +303,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
278 } 303 }
279 304
280 alrm->pending = 0; 305 alrm->pending = 0;
281 ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val); 306 ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val);
282 if (ret < 0) 307 if (ret < 0)
283 return ret; 308 return ret;
284 break; 309 break;
@@ -301,7 +326,7 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
301 int ret, i; 326 int ret, i;
302 struct rtc_time tm; 327 struct rtc_time tm;
303 328
304 ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); 329 ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
305 if (ret < 0) 330 if (ret < 0)
306 return ret; 331 return ret;
307 332
@@ -312,14 +337,14 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
312 337
313 switch (info->device_type) { 338 switch (info->device_type) {
314 case S5M8763X: 339 case S5M8763X:
315 ret = regmap_write(info->rtc, SEC_ALARM0_CONF, 0); 340 ret = regmap_write(info->regmap, SEC_ALARM0_CONF, 0);
316 break; 341 break;
317 342
318 case S5M8767X: 343 case S5M8767X:
319 for (i = 0; i < 7; i++) 344 for (i = 0; i < 7; i++)
320 data[i] &= ~ALARM_ENABLE_MASK; 345 data[i] &= ~ALARM_ENABLE_MASK;
321 346
322 ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); 347 ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
323 if (ret < 0) 348 if (ret < 0)
324 return ret; 349 return ret;
325 350
@@ -341,7 +366,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
341 u8 alarm0_conf; 366 u8 alarm0_conf;
342 struct rtc_time tm; 367 struct rtc_time tm;
343 368
344 ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); 369 ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
345 if (ret < 0) 370 if (ret < 0)
346 return ret; 371 return ret;
347 372
@@ -353,7 +378,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
353 switch (info->device_type) { 378 switch (info->device_type) {
354 case S5M8763X: 379 case S5M8763X:
355 alarm0_conf = 0x77; 380 alarm0_conf = 0x77;
356 ret = regmap_write(info->rtc, SEC_ALARM0_CONF, alarm0_conf); 381 ret = regmap_write(info->regmap, SEC_ALARM0_CONF, alarm0_conf);
357 break; 382 break;
358 383
359 case S5M8767X: 384 case S5M8767X:
@@ -368,7 +393,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
368 if (data[RTC_YEAR1] & 0x7f) 393 if (data[RTC_YEAR1] & 0x7f)
369 data[RTC_YEAR1] |= ALARM_ENABLE_MASK; 394 data[RTC_YEAR1] |= ALARM_ENABLE_MASK;
370 395
371 ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); 396 ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
372 if (ret < 0) 397 if (ret < 0)
373 return ret; 398 return ret;
374 ret = s5m8767_rtc_set_alarm_reg(info); 399 ret = s5m8767_rtc_set_alarm_reg(info);
@@ -410,7 +435,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
410 if (ret < 0) 435 if (ret < 0)
411 return ret; 436 return ret;
412 437
413 ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); 438 ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
414 if (ret < 0) 439 if (ret < 0)
415 return ret; 440 return ret;
416 441
@@ -455,7 +480,7 @@ static const struct rtc_class_ops s5m_rtc_ops = {
455static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable) 480static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
456{ 481{
457 int ret; 482 int ret;
458 ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL, 483 ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL,
459 WTSR_ENABLE_MASK, 484 WTSR_ENABLE_MASK,
460 enable ? WTSR_ENABLE_MASK : 0); 485 enable ? WTSR_ENABLE_MASK : 0);
461 if (ret < 0) 486 if (ret < 0)
@@ -466,7 +491,7 @@ static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
466static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable) 491static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable)
467{ 492{
468 int ret; 493 int ret;
469 ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL, 494 ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL,
470 SMPL_ENABLE_MASK, 495 SMPL_ENABLE_MASK,
471 enable ? SMPL_ENABLE_MASK : 0); 496 enable ? SMPL_ENABLE_MASK : 0);
472 if (ret < 0) 497 if (ret < 0)
@@ -481,7 +506,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
481 int ret; 506 int ret;
482 struct rtc_time tm; 507 struct rtc_time tm;
483 508
484 ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &tp_read); 509 ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &tp_read);
485 if (ret < 0) { 510 if (ret < 0) {
486 dev_err(info->dev, "%s: fail to read control reg(%d)\n", 511 dev_err(info->dev, "%s: fail to read control reg(%d)\n",
487 __func__, ret); 512 __func__, ret);
@@ -493,7 +518,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
493 data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT); 518 data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
494 519
495 info->rtc_24hr_mode = 1; 520 info->rtc_24hr_mode = 1;
496 ret = regmap_raw_write(info->rtc, SEC_ALARM0_CONF, data, 2); 521 ret = regmap_raw_write(info->regmap, SEC_ALARM0_CONF, data, 2);
497 if (ret < 0) { 522 if (ret < 0) {
498 dev_err(info->dev, "%s: fail to write controlm reg(%d)\n", 523 dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
499 __func__, ret); 524 __func__, ret);
@@ -515,7 +540,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
515 ret = s5m_rtc_set_time(info->dev, &tm); 540 ret = s5m_rtc_set_time(info->dev, &tm);
516 } 541 }
517 542
518 ret = regmap_update_bits(info->rtc, SEC_RTC_UDR_CON, 543 ret = regmap_update_bits(info->regmap, SEC_RTC_UDR_CON,
519 RTC_TCON_MASK, tp_read | RTC_TCON_MASK); 544 RTC_TCON_MASK, tp_read | RTC_TCON_MASK);
520 if (ret < 0) 545 if (ret < 0)
521 dev_err(info->dev, "%s: fail to update TCON reg(%d)\n", 546 dev_err(info->dev, "%s: fail to update TCON reg(%d)\n",
@@ -542,17 +567,19 @@ static int s5m_rtc_probe(struct platform_device *pdev)
542 567
543 info->dev = &pdev->dev; 568 info->dev = &pdev->dev;
544 info->s5m87xx = s5m87xx; 569 info->s5m87xx = s5m87xx;
545 info->rtc = s5m87xx->rtc; 570 info->regmap = s5m87xx->regmap_rtc;
546 info->device_type = s5m87xx->device_type; 571 info->device_type = s5m87xx->device_type;
547 info->wtsr_smpl = s5m87xx->wtsr_smpl; 572 info->wtsr_smpl = s5m87xx->wtsr_smpl;
548 573
549 switch (pdata->device_type) { 574 switch (pdata->device_type) {
550 case S5M8763X: 575 case S5M8763X:
551 info->irq = s5m87xx->irq_base + S5M8763_IRQ_ALARM0; 576 info->irq = regmap_irq_get_virq(s5m87xx->irq_data,
577 S5M8763_IRQ_ALARM0);
552 break; 578 break;
553 579
554 case S5M8767X: 580 case S5M8767X:
555 info->irq = s5m87xx->irq_base + S5M8767_IRQ_RTCA1; 581 info->irq = regmap_irq_get_virq(s5m87xx->irq_data,
582 S5M8767_IRQ_RTCA1);
556 break; 583 break;
557 584
558 default: 585 default:
@@ -596,7 +623,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
596 if (info->wtsr_smpl) { 623 if (info->wtsr_smpl) {
597 for (i = 0; i < 3; i++) { 624 for (i = 0; i < 3; i++) {
598 s5m_rtc_enable_wtsr(info, false); 625 s5m_rtc_enable_wtsr(info, false);
599 regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val); 626 regmap_read(info->regmap, SEC_WTSR_SMPL_CNTL, &val);
600 pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val); 627 pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
601 if (val & WTSR_ENABLE_MASK) 628 if (val & WTSR_ENABLE_MASK)
602 pr_emerg("%s: fail to disable WTSR\n", 629 pr_emerg("%s: fail to disable WTSR\n",
@@ -612,6 +639,30 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
612 s5m_rtc_enable_smpl(info, false); 639 s5m_rtc_enable_smpl(info, false);
613} 640}
614 641
642static int s5m_rtc_resume(struct device *dev)
643{
644 struct s5m_rtc_info *info = dev_get_drvdata(dev);
645 int ret = 0;
646
647 if (device_may_wakeup(dev))
648 ret = disable_irq_wake(info->irq);
649
650 return ret;
651}
652
653static int s5m_rtc_suspend(struct device *dev)
654{
655 struct s5m_rtc_info *info = dev_get_drvdata(dev);
656 int ret = 0;
657
658 if (device_may_wakeup(dev))
659 ret = enable_irq_wake(info->irq);
660
661 return ret;
662}
663
664static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
665
615static const struct platform_device_id s5m_rtc_id[] = { 666static const struct platform_device_id s5m_rtc_id[] = {
616 { "s5m-rtc", 0 }, 667 { "s5m-rtc", 0 },
617}; 668};
@@ -620,6 +671,7 @@ static struct platform_driver s5m_rtc_driver = {
620 .driver = { 671 .driver = {
621 .name = "s5m-rtc", 672 .name = "s5m-rtc",
622 .owner = THIS_MODULE, 673 .owner = THIS_MODULE,
674 .pm = &s5m_rtc_pm_ops,
623 }, 675 },
624 .probe = s5m_rtc_probe, 676 .probe = s5m_rtc_probe,
625 .shutdown = s5m_rtc_shutdown, 677 .shutdown = s5m_rtc_shutdown,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cee7e2708a1f..95e45782692f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3224,6 +3224,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
3224 3224
3225 fcx_multitrack = private->features.feature[40] & 0x20; 3225 fcx_multitrack = private->features.feature[40] & 0x20;
3226 data_size = blk_rq_bytes(req); 3226 data_size = blk_rq_bytes(req);
3227 if (data_size % blksize)
3228 return ERR_PTR(-EINVAL);
3227 /* tpm write request add CBC data on each track boundary */ 3229 /* tpm write request add CBC data on each track boundary */
3228 if (rq_data_dir(req) == WRITE) 3230 if (rq_data_dir(req) == WRITE)
3229 data_size += (last_trk - first_trk) * 4; 3231 data_size += (last_trk - first_trk) * 4;
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index f64921756ad6..f224d59c4b6b 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -87,7 +87,6 @@ void dasd_gendisk_free(struct dasd_block *block)
87{ 87{
88 if (block->gdp) { 88 if (block->gdp) {
89 del_gendisk(block->gdp); 89 del_gendisk(block->gdp);
90 block->gdp->queue = NULL;
91 block->gdp->private_data = NULL; 90 block->gdp->private_data = NULL;
92 put_disk(block->gdp); 91 put_disk(block->gdp);
93 block->gdp = NULL; 92 block->gdp = NULL;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 548209a9c43c..d0ab5019d885 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -118,22 +118,6 @@ static void scm_request_done(struct scm_request *scmrq)
118 spin_unlock_irqrestore(&list_lock, flags); 118 spin_unlock_irqrestore(&list_lock, flags);
119} 119}
120 120
121static int scm_open(struct block_device *blkdev, fmode_t mode)
122{
123 return scm_get_ref();
124}
125
126static void scm_release(struct gendisk *gendisk, fmode_t mode)
127{
128 scm_put_ref();
129}
130
131static const struct block_device_operations scm_blk_devops = {
132 .owner = THIS_MODULE,
133 .open = scm_open,
134 .release = scm_release,
135};
136
137static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) 121static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
138{ 122{
139 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; 123 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
@@ -256,7 +240,7 @@ static void scm_blk_request(struct request_queue *rq)
256 atomic_inc(&bdev->queued_reqs); 240 atomic_inc(&bdev->queued_reqs);
257 blk_start_request(req); 241 blk_start_request(req);
258 242
259 ret = scm_start_aob(scmrq->aob); 243 ret = eadm_start_aob(scmrq->aob);
260 if (ret) { 244 if (ret) {
261 SCM_LOG(5, "no subchannel"); 245 SCM_LOG(5, "no subchannel");
262 scm_request_requeue(scmrq); 246 scm_request_requeue(scmrq);
@@ -320,7 +304,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
320 } 304 }
321 305
322restart: 306restart:
323 if (!scm_start_aob(scmrq->aob)) 307 if (!eadm_start_aob(scmrq->aob))
324 return; 308 return;
325 309
326requeue: 310requeue:
@@ -363,6 +347,10 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
363 blk_run_queue(bdev->rq); 347 blk_run_queue(bdev->rq);
364} 348}
365 349
350static const struct block_device_operations scm_blk_devops = {
351 .owner = THIS_MODULE,
352};
353
366int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) 354int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
367{ 355{
368 struct request_queue *rq; 356 struct request_queue *rq;
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index c0d102e3a48b..27f930cd657f 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -187,7 +187,7 @@ bool scm_need_cluster_request(struct scm_request *scmrq)
187void scm_initiate_cluster_request(struct scm_request *scmrq) 187void scm_initiate_cluster_request(struct scm_request *scmrq)
188{ 188{
189 scm_prepare_cluster_request(scmrq); 189 scm_prepare_cluster_request(scmrq);
190 if (scm_start_aob(scmrq->aob)) 190 if (eadm_start_aob(scmrq->aob))
191 scm_request_requeue(scmrq); 191 scm_request_requeue(scmrq);
192} 192}
193 193
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 17821a026c9c..b69ab17f13fa 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,8 @@
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o 6 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
7 sclp_early.o
7 8
8obj-$(CONFIG_TN3270) += raw3270.o 9obj-$(CONFIG_TN3270) += raw3270.o
9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 10obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index f93cc32eb818..71e974738014 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -564,6 +564,7 @@ static void __exit
564fs3270_exit(void) 564fs3270_exit(void)
565{ 565{
566 raw3270_unregister_notifier(&fs3270_notifier); 566 raw3270_unregister_notifier(&fs3270_notifier);
567 device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
567 __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270"); 568 __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
568} 569}
569 570
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 40d1406289ed..6fbe09686d18 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -99,6 +99,7 @@ struct init_sccb {
99} __attribute__((packed)); 99} __attribute__((packed));
100 100
101extern u64 sclp_facilities; 101extern u64 sclp_facilities;
102
102#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) 103#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
103#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) 104#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
104#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) 105#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
@@ -179,6 +180,10 @@ void sclp_sdias_exit(void);
179extern int sclp_console_pages; 180extern int sclp_console_pages;
180extern int sclp_console_drop; 181extern int sclp_console_drop;
181extern unsigned long sclp_console_full; 182extern unsigned long sclp_console_full;
183extern u8 sclp_fac84;
184extern unsigned long long sclp_rzm;
185extern unsigned long long sclp_rnmax;
186extern __initdata int sclp_early_read_info_sccb_valid;
182 187
183/* useful inlines */ 188/* useful inlines */
184 189
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 77df9cb00688..eaa21d542c5c 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -28,168 +28,6 @@
28 28
29#include "sclp.h" 29#include "sclp.h"
30 30
31#define SCLP_CMDW_READ_SCP_INFO 0x00020001
32#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
33
34struct read_info_sccb {
35 struct sccb_header header; /* 0-7 */
36 u16 rnmax; /* 8-9 */
37 u8 rnsize; /* 10 */
38 u8 _reserved0[24 - 11]; /* 11-15 */
39 u8 loadparm[8]; /* 24-31 */
40 u8 _reserved1[48 - 32]; /* 32-47 */
41 u64 facilities; /* 48-55 */
42 u8 _reserved2[84 - 56]; /* 56-83 */
43 u8 fac84; /* 84 */
44 u8 fac85; /* 85 */
45 u8 _reserved3[91 - 86]; /* 86-90 */
46 u8 flags; /* 91 */
47 u8 _reserved4[100 - 92]; /* 92-99 */
48 u32 rnsize2; /* 100-103 */
49 u64 rnmax2; /* 104-111 */
50 u8 _reserved5[4096 - 112]; /* 112-4095 */
51} __attribute__((packed, aligned(PAGE_SIZE)));
52
53static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE);
54static struct read_info_sccb __initdata early_read_info_sccb;
55static int __initdata early_read_info_sccb_valid;
56
57u64 sclp_facilities;
58static u8 sclp_fac84;
59static unsigned long long rzm;
60static unsigned long long rnmax;
61
62static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
63{
64 int rc;
65
66 __ctl_set_bit(0, 9);
67 rc = sclp_service_call(cmd, sccb);
68 if (rc)
69 goto out;
70 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
71 PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
72 local_irq_disable();
73out:
74 /* Contents of the sccb might have changed. */
75 barrier();
76 __ctl_clear_bit(0, 9);
77 return rc;
78}
79
80static void __init sclp_read_info_early(void)
81{
82 int rc;
83 int i;
84 struct read_info_sccb *sccb;
85 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
86 SCLP_CMDW_READ_SCP_INFO};
87
88 sccb = &early_read_info_sccb;
89 for (i = 0; i < ARRAY_SIZE(commands); i++) {
90 do {
91 memset(sccb, 0, sizeof(*sccb));
92 sccb->header.length = sizeof(*sccb);
93 sccb->header.function_code = 0x80;
94 sccb->header.control_mask[2] = 0x80;
95 rc = sclp_cmd_sync_early(commands[i], sccb);
96 } while (rc == -EBUSY);
97
98 if (rc)
99 break;
100 if (sccb->header.response_code == 0x10) {
101 early_read_info_sccb_valid = 1;
102 break;
103 }
104 if (sccb->header.response_code != 0x1f0)
105 break;
106 }
107}
108
109static void __init sclp_event_mask_early(void)
110{
111 struct init_sccb *sccb = &early_event_mask_sccb;
112 int rc;
113
114 do {
115 memset(sccb, 0, sizeof(*sccb));
116 sccb->header.length = sizeof(*sccb);
117 sccb->mask_length = sizeof(sccb_mask_t);
118 rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
119 } while (rc == -EBUSY);
120}
121
122void __init sclp_facilities_detect(void)
123{
124 struct read_info_sccb *sccb;
125
126 sclp_read_info_early();
127 if (!early_read_info_sccb_valid)
128 return;
129
130 sccb = &early_read_info_sccb;
131 sclp_facilities = sccb->facilities;
132 sclp_fac84 = sccb->fac84;
133 if (sccb->fac85 & 0x02)
134 S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
135 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
136 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
137 rzm <<= 20;
138
139 sclp_event_mask_early();
140}
141
142bool __init sclp_has_linemode(void)
143{
144 struct init_sccb *sccb = &early_event_mask_sccb;
145
146 if (sccb->header.response_code != 0x20)
147 return 0;
148 if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
149 return 0;
150 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
151 return 0;
152 return 1;
153}
154
155bool __init sclp_has_vt220(void)
156{
157 struct init_sccb *sccb = &early_event_mask_sccb;
158
159 if (sccb->header.response_code != 0x20)
160 return 0;
161 if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
162 return 1;
163 return 0;
164}
165
166unsigned long long sclp_get_rnmax(void)
167{
168 return rnmax;
169}
170
171unsigned long long sclp_get_rzm(void)
172{
173 return rzm;
174}
175
176/*
177 * This function will be called after sclp_facilities_detect(), which gets
178 * called from early.c code. Therefore the sccb should have valid contents.
179 */
180void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
181{
182 struct read_info_sccb *sccb;
183
184 if (!early_read_info_sccb_valid)
185 return;
186 sccb = &early_read_info_sccb;
187 info->is_valid = 1;
188 if (sccb->flags & 0x2)
189 info->has_dump = 1;
190 memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
191}
192
193static void sclp_sync_callback(struct sclp_req *req, void *data) 31static void sclp_sync_callback(struct sclp_req *req, void *data)
194{ 32{
195 struct completion *completion = data; 33 struct completion *completion = data;
@@ -356,14 +194,14 @@ struct assign_storage_sccb {
356 194
357int arch_get_memory_phys_device(unsigned long start_pfn) 195int arch_get_memory_phys_device(unsigned long start_pfn)
358{ 196{
359 if (!rzm) 197 if (!sclp_rzm)
360 return 0; 198 return 0;
361 return PFN_PHYS(start_pfn) >> ilog2(rzm); 199 return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm);
362} 200}
363 201
364static unsigned long long rn2addr(u16 rn) 202static unsigned long long rn2addr(u16 rn)
365{ 203{
366 return (unsigned long long) (rn - 1) * rzm; 204 return (unsigned long long) (rn - 1) * sclp_rzm;
367} 205}
368 206
369static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) 207static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
@@ -404,7 +242,7 @@ static int sclp_assign_storage(u16 rn)
404 if (rc) 242 if (rc)
405 return rc; 243 return rc;
406 start = rn2addr(rn); 244 start = rn2addr(rn);
407 storage_key_init_range(start, start + rzm); 245 storage_key_init_range(start, start + sclp_rzm);
408 return 0; 246 return 0;
409} 247}
410 248
@@ -462,7 +300,7 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size,
462 istart = rn2addr(incr->rn); 300 istart = rn2addr(incr->rn);
463 if (start + size - 1 < istart) 301 if (start + size - 1 < istart)
464 break; 302 break;
465 if (start > istart + rzm - 1) 303 if (start > istart + sclp_rzm - 1)
466 continue; 304 continue;
467 if (online) 305 if (online)
468 rc |= sclp_assign_storage(incr->rn); 306 rc |= sclp_assign_storage(incr->rn);
@@ -526,7 +364,7 @@ static void __init add_memory_merged(u16 rn)
526 if (!first_rn) 364 if (!first_rn)
527 goto skip_add; 365 goto skip_add;
528 start = rn2addr(first_rn); 366 start = rn2addr(first_rn);
529 size = (unsigned long long ) num * rzm; 367 size = (unsigned long long) num * sclp_rzm;
530 if (start >= VMEM_MAX_PHYS) 368 if (start >= VMEM_MAX_PHYS)
531 goto skip_add; 369 goto skip_add;
532 if (start + size > VMEM_MAX_PHYS) 370 if (start + size > VMEM_MAX_PHYS)
@@ -574,7 +412,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
574 } 412 }
575 if (!assigned) 413 if (!assigned)
576 new_incr->rn = last_rn + 1; 414 new_incr->rn = last_rn + 1;
577 if (new_incr->rn > rnmax) { 415 if (new_incr->rn > sclp_rnmax) {
578 kfree(new_incr); 416 kfree(new_incr);
579 return; 417 return;
580 } 418 }
@@ -617,7 +455,7 @@ static int __init sclp_detect_standby_memory(void)
617 455
618 if (OLDMEM_BASE) /* No standby memory in kdump mode */ 456 if (OLDMEM_BASE) /* No standby memory in kdump mode */
619 return 0; 457 return 0;
620 if (!early_read_info_sccb_valid) 458 if (!sclp_early_read_info_sccb_valid)
621 return 0; 459 return 0;
622 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) 460 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
623 return 0; 461 return 0;
@@ -661,7 +499,7 @@ static int __init sclp_detect_standby_memory(void)
661 } 499 }
662 if (rc || list_empty(&sclp_mem_list)) 500 if (rc || list_empty(&sclp_mem_list))
663 goto out; 501 goto out;
664 for (i = 1; i <= rnmax - assigned; i++) 502 for (i = 1; i <= sclp_rnmax - assigned; i++)
665 insert_increment(0, 1, 0); 503 insert_increment(0, 1, 0);
666 rc = register_memory_notifier(&sclp_mem_nb); 504 rc = register_memory_notifier(&sclp_mem_nb);
667 if (rc) 505 if (rc)
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
new file mode 100644
index 000000000000..1465e9563101
--- /dev/null
+++ b/drivers/s390/char/sclp_early.c
@@ -0,0 +1,263 @@
1/*
2 * SCLP early driver
3 *
4 * Copyright IBM Corp. 2013
5 */
6
7#define KMSG_COMPONENT "sclp_early"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <asm/ctl_reg.h>
11#include <asm/sclp.h>
12#include <asm/ipl.h>
13#include "sclp_sdias.h"
14#include "sclp.h"
15
16#define SCLP_CMDW_READ_SCP_INFO 0x00020001
17#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
18
19struct read_info_sccb {
20 struct sccb_header header; /* 0-7 */
21 u16 rnmax; /* 8-9 */
22 u8 rnsize; /* 10 */
23 u8 _reserved0[24 - 11]; /* 11-15 */
24 u8 loadparm[8]; /* 24-31 */
25 u8 _reserved1[48 - 32]; /* 32-47 */
26 u64 facilities; /* 48-55 */
27 u8 _reserved2[84 - 56]; /* 56-83 */
28 u8 fac84; /* 84 */
29 u8 fac85; /* 85 */
30 u8 _reserved3[91 - 86]; /* 86-90 */
31 u8 flags; /* 91 */
32 u8 _reserved4[100 - 92]; /* 92-99 */
33 u32 rnsize2; /* 100-103 */
34 u64 rnmax2; /* 104-111 */
35 u8 _reserved5[4096 - 112]; /* 112-4095 */
36} __packed __aligned(PAGE_SIZE);
37
38static __initdata struct read_info_sccb early_read_info_sccb;
39static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE);
40static unsigned long sclp_hsa_size;
41
42__initdata int sclp_early_read_info_sccb_valid;
43u64 sclp_facilities;
44u8 sclp_fac84;
45unsigned long long sclp_rzm;
46unsigned long long sclp_rnmax;
47
48static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
49{
50 int rc;
51
52 __ctl_set_bit(0, 9);
53 rc = sclp_service_call(cmd, sccb);
54 if (rc)
55 goto out;
56 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
57 PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
58 local_irq_disable();
59out:
60 /* Contents of the sccb might have changed. */
61 barrier();
62 __ctl_clear_bit(0, 9);
63 return rc;
64}
65
66static void __init sclp_read_info_early(void)
67{
68 int rc;
69 int i;
70 struct read_info_sccb *sccb;
71 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
72 SCLP_CMDW_READ_SCP_INFO};
73
74 sccb = &early_read_info_sccb;
75 for (i = 0; i < ARRAY_SIZE(commands); i++) {
76 do {
77 memset(sccb, 0, sizeof(*sccb));
78 sccb->header.length = sizeof(*sccb);
79 sccb->header.function_code = 0x80;
80 sccb->header.control_mask[2] = 0x80;
81 rc = sclp_cmd_sync_early(commands[i], sccb);
82 } while (rc == -EBUSY);
83
84 if (rc)
85 break;
86 if (sccb->header.response_code == 0x10) {
87 sclp_early_read_info_sccb_valid = 1;
88 break;
89 }
90 if (sccb->header.response_code != 0x1f0)
91 break;
92 }
93}
94
95static void __init sclp_facilities_detect(void)
96{
97 struct read_info_sccb *sccb;
98
99 sclp_read_info_early();
100 if (!sclp_early_read_info_sccb_valid)
101 return;
102
103 sccb = &early_read_info_sccb;
104 sclp_facilities = sccb->facilities;
105 sclp_fac84 = sccb->fac84;
106 if (sccb->fac85 & 0x02)
107 S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
108 sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
109 sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
110 sclp_rzm <<= 20;
111}
112
113bool __init sclp_has_linemode(void)
114{
115 struct init_sccb *sccb = (void *) &sccb_early;
116
117 if (sccb->header.response_code != 0x20)
118 return 0;
119 if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
120 return 0;
121 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
122 return 0;
123 return 1;
124}
125
126bool __init sclp_has_vt220(void)
127{
128 struct init_sccb *sccb = (void *) &sccb_early;
129
130 if (sccb->header.response_code != 0x20)
131 return 0;
132 if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
133 return 1;
134 return 0;
135}
136
137unsigned long long sclp_get_rnmax(void)
138{
139 return sclp_rnmax;
140}
141
142unsigned long long sclp_get_rzm(void)
143{
144 return sclp_rzm;
145}
146
147/*
148 * This function will be called after sclp_facilities_detect(), which gets
149 * called from early.c code. Therefore the sccb should have valid contents.
150 */
151void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
152{
153 struct read_info_sccb *sccb;
154
155 if (!sclp_early_read_info_sccb_valid)
156 return;
157 sccb = &early_read_info_sccb;
158 info->is_valid = 1;
159 if (sccb->flags & 0x2)
160 info->has_dump = 1;
161 memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
162}
163
164static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
165{
166 int rc;
167
168 do {
169 rc = sclp_cmd_sync_early(cmd, sccb);
170 } while (rc == -EBUSY);
171
172 if (rc)
173 return -EIO;
174 if (((struct sccb_header *) sccb)->response_code != 0x0020)
175 return -EIO;
176 return 0;
177}
178
179static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
180{
181 memset(sccb, 0, sizeof(*sccb));
182
183 sccb->hdr.length = sizeof(*sccb);
184 sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
185 sccb->evbuf.hdr.type = EVTYP_SDIAS;
186 sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
187 sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
188 sccb->evbuf.event_id = 4712;
189 sccb->evbuf.dbs = 1;
190}
191
192static int __init sclp_set_event_mask(unsigned long receive_mask,
193 unsigned long send_mask)
194{
195 struct init_sccb *sccb = (void *) &sccb_early;
196
197 memset(sccb, 0, sizeof(*sccb));
198 sccb->header.length = sizeof(*sccb);
199 sccb->mask_length = sizeof(sccb_mask_t);
200 sccb->receive_mask = receive_mask;
201 sccb->send_mask = send_mask;
202 return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
203}
204
205static long __init sclp_hsa_size_init(void)
206{
207 struct sdias_sccb *sccb = (void *) &sccb_early;
208
209 sccb_init_eq_size(sccb);
210 if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
211 return -EIO;
212 if (sccb->evbuf.blk_cnt != 0)
213 return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
214 return 0;
215}
216
217static long __init sclp_hsa_copy_wait(void)
218{
219 struct sccb_header *sccb = (void *) &sccb_early;
220
221 memset(sccb, 0, PAGE_SIZE);
222 sccb->length = PAGE_SIZE;
223 if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
224 return -EIO;
225 return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
226}
227
228unsigned long sclp_get_hsa_size(void)
229{
230 return sclp_hsa_size;
231}
232
233static void __init sclp_hsa_size_detect(void)
234{
235 long size;
236
237 /* First try synchronous interface (LPAR) */
238 if (sclp_set_event_mask(0, 0x40000010))
239 return;
240 size = sclp_hsa_size_init();
241 if (size < 0)
242 return;
243 if (size != 0)
244 goto out;
245 /* Then try asynchronous interface (z/VM) */
246 if (sclp_set_event_mask(0x00000010, 0x40000010))
247 return;
248 size = sclp_hsa_size_init();
249 if (size < 0)
250 return;
251 size = sclp_hsa_copy_wait();
252 if (size < 0)
253 return;
254out:
255 sclp_hsa_size = size;
256}
257
258void __init sclp_early_detect(void)
259{
260 sclp_facilities_detect();
261 sclp_hsa_size_detect();
262 sclp_set_event_mask(0, 0);
263}
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index b1032931a1c4..561a0414b352 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Sclp "store data in absolut storage" 2 * SCLP "store data in absolute storage"
3 * 3 *
4 * Copyright IBM Corp. 2003, 2007 4 * Copyright IBM Corp. 2003, 2013
5 * Author(s): Michael Holzheu 5 * Author(s): Michael Holzheu
6 */ 6 */
7 7
@@ -14,6 +14,7 @@
14#include <asm/debug.h> 14#include <asm/debug.h>
15#include <asm/ipl.h> 15#include <asm/ipl.h>
16 16
17#include "sclp_sdias.h"
17#include "sclp.h" 18#include "sclp.h"
18#include "sclp_rw.h" 19#include "sclp_rw.h"
19 20
@@ -22,46 +23,12 @@
22#define SDIAS_RETRIES 300 23#define SDIAS_RETRIES 300
23#define SDIAS_SLEEP_TICKS 50 24#define SDIAS_SLEEP_TICKS 50
24 25
25#define EQ_STORE_DATA 0x0
26#define EQ_SIZE 0x1
27#define DI_FCP_DUMP 0x0
28#define ASA_SIZE_32 0x0
29#define ASA_SIZE_64 0x1
30#define EVSTATE_ALL_STORED 0x0
31#define EVSTATE_NO_DATA 0x3
32#define EVSTATE_PART_STORED 0x10
33
34static struct debug_info *sdias_dbf; 26static struct debug_info *sdias_dbf;
35 27
36static struct sclp_register sclp_sdias_register = { 28static struct sclp_register sclp_sdias_register = {
37 .send_mask = EVTYP_SDIAS_MASK, 29 .send_mask = EVTYP_SDIAS_MASK,
38}; 30};
39 31
40struct sdias_evbuf {
41 struct evbuf_header hdr;
42 u8 event_qual;
43 u8 data_id;
44 u64 reserved2;
45 u32 event_id;
46 u16 reserved3;
47 u8 asa_size;
48 u8 event_status;
49 u32 reserved4;
50 u32 blk_cnt;
51 u64 asa;
52 u32 reserved5;
53 u32 fbn;
54 u32 reserved6;
55 u32 lbn;
56 u16 reserved7;
57 u16 dbs;
58} __attribute__((packed));
59
60struct sdias_sccb {
61 struct sccb_header hdr;
62 struct sdias_evbuf evbuf;
63} __attribute__((packed));
64
65static struct sdias_sccb sccb __attribute__((aligned(4096))); 32static struct sdias_sccb sccb __attribute__((aligned(4096)));
66static struct sdias_evbuf sdias_evbuf; 33static struct sdias_evbuf sdias_evbuf;
67 34
@@ -148,8 +115,8 @@ int sclp_sdias_blk_count(void)
148 sccb.hdr.length = sizeof(sccb); 115 sccb.hdr.length = sizeof(sccb);
149 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); 116 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
150 sccb.evbuf.hdr.type = EVTYP_SDIAS; 117 sccb.evbuf.hdr.type = EVTYP_SDIAS;
151 sccb.evbuf.event_qual = EQ_SIZE; 118 sccb.evbuf.event_qual = SDIAS_EQ_SIZE;
152 sccb.evbuf.data_id = DI_FCP_DUMP; 119 sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
153 sccb.evbuf.event_id = 4712; 120 sccb.evbuf.event_id = 4712;
154 sccb.evbuf.dbs = 1; 121 sccb.evbuf.dbs = 1;
155 122
@@ -208,13 +175,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
208 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); 175 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
209 sccb.evbuf.hdr.type = EVTYP_SDIAS; 176 sccb.evbuf.hdr.type = EVTYP_SDIAS;
210 sccb.evbuf.hdr.flags = 0; 177 sccb.evbuf.hdr.flags = 0;
211 sccb.evbuf.event_qual = EQ_STORE_DATA; 178 sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA;
212 sccb.evbuf.data_id = DI_FCP_DUMP; 179 sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
213 sccb.evbuf.event_id = 4712; 180 sccb.evbuf.event_id = 4712;
214#ifdef CONFIG_64BIT 181#ifdef CONFIG_64BIT
215 sccb.evbuf.asa_size = ASA_SIZE_64; 182 sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64;
216#else 183#else
217 sccb.evbuf.asa_size = ASA_SIZE_32; 184 sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32;
218#endif 185#endif
219 sccb.evbuf.event_status = 0; 186 sccb.evbuf.event_status = 0;
220 sccb.evbuf.blk_cnt = nr_blks; 187 sccb.evbuf.blk_cnt = nr_blks;
@@ -240,20 +207,19 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
240 } 207 }
241 208
242 switch (sdias_evbuf.event_status) { 209 switch (sdias_evbuf.event_status) {
243 case EVSTATE_ALL_STORED: 210 case SDIAS_EVSTATE_ALL_STORED:
244 TRACE("all stored\n"); 211 TRACE("all stored\n");
245 break; 212 break;
246 case EVSTATE_PART_STORED: 213 case SDIAS_EVSTATE_PART_STORED:
247 TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); 214 TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
248 break; 215 break;
249 case EVSTATE_NO_DATA: 216 case SDIAS_EVSTATE_NO_DATA:
250 TRACE("no data\n"); 217 TRACE("no data\n");
251 /* fall through */ 218 /* fall through */
252 default: 219 default:
253 pr_err("Error from SCLP while copying hsa. " 220 pr_err("Error from SCLP while copying hsa. Event status = %x\n",
254 "Event status = %x\n", 221 sdias_evbuf.event_status);
255 sdias_evbuf.event_status); 222 rc = -EIO;
256 rc = -EIO;
257 } 223 }
258out: 224out:
259 mutex_unlock(&sdias_mutex); 225 mutex_unlock(&sdias_mutex);
diff --git a/drivers/s390/char/sclp_sdias.h b/drivers/s390/char/sclp_sdias.h
new file mode 100644
index 000000000000..f2431c414150
--- /dev/null
+++ b/drivers/s390/char/sclp_sdias.h
@@ -0,0 +1,46 @@
1/*
2 * SCLP "store data in absolute storage"
3 *
4 * Copyright IBM Corp. 2003, 2013
5 */
6
7#ifndef SCLP_SDIAS_H
8#define SCLP_SDIAS_H
9
10#include "sclp.h"
11
12#define SDIAS_EQ_STORE_DATA 0x0
13#define SDIAS_EQ_SIZE 0x1
14#define SDIAS_DI_FCP_DUMP 0x0
15#define SDIAS_ASA_SIZE_32 0x0
16#define SDIAS_ASA_SIZE_64 0x1
17#define SDIAS_EVSTATE_ALL_STORED 0x0
18#define SDIAS_EVSTATE_NO_DATA 0x3
19#define SDIAS_EVSTATE_PART_STORED 0x10
20
21struct sdias_evbuf {
22 struct evbuf_header hdr;
23 u8 event_qual;
24 u8 data_id;
25 u64 reserved2;
26 u32 event_id;
27 u16 reserved3;
28 u8 asa_size;
29 u8 event_status;
30 u32 reserved4;
31 u32 blk_cnt;
32 u64 asa;
33 u32 reserved5;
34 u32 fbn;
35 u32 reserved6;
36 u32 lbn;
37 u16 reserved7;
38 u16 dbs;
39} __packed;
40
41struct sdias_sccb {
42 struct sccb_header hdr;
43 struct sdias_evbuf evbuf;
44} __packed;
45
46#endif /* SCLP_SDIAS_H */
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index ffb1fcf0bf5b..3d8e4d63f514 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -328,9 +328,9 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
328 mem_offs = 0; 328 mem_offs = 0;
329 329
330 /* Copy from HSA data */ 330 /* Copy from HSA data */
331 if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { 331 if (*ppos < sclp_get_hsa_size() + HEADER_SIZE) {
332 size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE 332 size = min((count - hdr_count),
333 - mem_start)); 333 (size_t) (sclp_get_hsa_size() - mem_start));
334 rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); 334 rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
335 if (rc) 335 if (rc)
336 goto fail; 336 goto fail;
@@ -490,7 +490,7 @@ static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
490 static char str[18]; 490 static char str[18];
491 491
492 if (hsa_available) 492 if (hsa_available)
493 snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE); 493 snprintf(str, sizeof(str), "%lx\n", sclp_get_hsa_size());
494 else 494 else
495 snprintf(str, sizeof(str), "0\n"); 495 snprintf(str, sizeof(str), "0\n");
496 return simple_read_from_buffer(buf, count, ppos, str, strlen(str)); 496 return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
@@ -584,17 +584,9 @@ static int __init sys_info_init(enum arch_id arch, unsigned long mem_end)
584 584
585static int __init check_sdias(void) 585static int __init check_sdias(void)
586{ 586{
587 int rc, act_hsa_size; 587 if (!sclp_get_hsa_size()) {
588
589 rc = sclp_sdias_blk_count();
590 if (rc < 0) {
591 TRACE("Could not determine HSA size\n"); 588 TRACE("Could not determine HSA size\n");
592 return rc; 589 return -ENODEV;
593 }
594 act_hsa_size = (rc - 1) * PAGE_SIZE;
595 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
596 TRACE("HSA size too small: %i\n", act_hsa_size);
597 return -EINVAL;
598 } 590 }
599 return 0; 591 return 0;
600} 592}
@@ -662,7 +654,7 @@ static int __init zcore_reipl_init(void)
662 ipl_block = (void *) __get_free_page(GFP_KERNEL); 654 ipl_block = (void *) __get_free_page(GFP_KERNEL);
663 if (!ipl_block) 655 if (!ipl_block)
664 return -ENOMEM; 656 return -ENOMEM;
665 if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE) 657 if (ipib_info.ipib < sclp_get_hsa_size())
666 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); 658 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
667 else 659 else
668 rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); 660 rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index aca7bfc113aa..3a2ee4a740b4 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -190,7 +190,7 @@ static struct subchannel *eadm_get_idle_sch(void)
190 return NULL; 190 return NULL;
191} 191}
192 192
193static int eadm_start_aob(struct aob *aob) 193int eadm_start_aob(struct aob *aob)
194{ 194{
195 struct eadm_private *private; 195 struct eadm_private *private;
196 struct subchannel *sch; 196 struct subchannel *sch;
@@ -218,6 +218,7 @@ out_unlock:
218 218
219 return ret; 219 return ret;
220} 220}
221EXPORT_SYMBOL_GPL(eadm_start_aob);
221 222
222static int eadm_subchannel_probe(struct subchannel *sch) 223static int eadm_subchannel_probe(struct subchannel *sch)
223{ 224{
@@ -380,11 +381,6 @@ static struct css_driver eadm_subchannel_driver = {
380 .restore = eadm_subchannel_restore, 381 .restore = eadm_subchannel_restore,
381}; 382};
382 383
383static struct eadm_ops eadm_ops = {
384 .eadm_start = eadm_start_aob,
385 .owner = THIS_MODULE,
386};
387
388static int __init eadm_sch_init(void) 384static int __init eadm_sch_init(void)
389{ 385{
390 int ret; 386 int ret;
@@ -404,7 +400,6 @@ static int __init eadm_sch_init(void)
404 if (ret) 400 if (ret)
405 goto cleanup; 401 goto cleanup;
406 402
407 register_eadm_ops(&eadm_ops);
408 return ret; 403 return ret;
409 404
410cleanup: 405cleanup:
@@ -415,7 +410,6 @@ cleanup:
415 410
416static void __exit eadm_sch_exit(void) 411static void __exit eadm_sch_exit(void)
417{ 412{
418 unregister_eadm_ops(&eadm_ops);
419 css_driver_unregister(&eadm_subchannel_driver); 413 css_driver_unregister(&eadm_subchannel_driver);
420 isc_unregister(EADM_SCH_ISC); 414 isc_unregister(EADM_SCH_ISC);
421 debug_unregister(eadm_debug); 415 debug_unregister(eadm_debug);
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index 46ec25632e8b..15268edc54ae 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -15,8 +15,6 @@
15#include "chsc.h" 15#include "chsc.h"
16 16
17static struct device *scm_root; 17static struct device *scm_root;
18static struct eadm_ops *eadm_ops;
19static DEFINE_MUTEX(eadm_ops_mutex);
20 18
21#define to_scm_dev(n) container_of(n, struct scm_device, dev) 19#define to_scm_dev(n) container_of(n, struct scm_device, dev)
22#define to_scm_drv(d) container_of(d, struct scm_driver, drv) 20#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
@@ -73,49 +71,6 @@ void scm_driver_unregister(struct scm_driver *scmdrv)
73} 71}
74EXPORT_SYMBOL_GPL(scm_driver_unregister); 72EXPORT_SYMBOL_GPL(scm_driver_unregister);
75 73
76int scm_get_ref(void)
77{
78 int ret = 0;
79
80 mutex_lock(&eadm_ops_mutex);
81 if (!eadm_ops || !try_module_get(eadm_ops->owner))
82 ret = -ENOENT;
83 mutex_unlock(&eadm_ops_mutex);
84
85 return ret;
86}
87EXPORT_SYMBOL_GPL(scm_get_ref);
88
89void scm_put_ref(void)
90{
91 mutex_lock(&eadm_ops_mutex);
92 module_put(eadm_ops->owner);
93 mutex_unlock(&eadm_ops_mutex);
94}
95EXPORT_SYMBOL_GPL(scm_put_ref);
96
97void register_eadm_ops(struct eadm_ops *ops)
98{
99 mutex_lock(&eadm_ops_mutex);
100 eadm_ops = ops;
101 mutex_unlock(&eadm_ops_mutex);
102}
103EXPORT_SYMBOL_GPL(register_eadm_ops);
104
105void unregister_eadm_ops(struct eadm_ops *ops)
106{
107 mutex_lock(&eadm_ops_mutex);
108 eadm_ops = NULL;
109 mutex_unlock(&eadm_ops_mutex);
110}
111EXPORT_SYMBOL_GPL(unregister_eadm_ops);
112
113int scm_start_aob(struct aob *aob)
114{
115 return eadm_ops->eadm_start(aob);
116}
117EXPORT_SYMBOL_GPL(scm_start_aob);
118
119void scm_irq_handler(struct aob *aob, int error) 74void scm_irq_handler(struct aob *aob, int error)
120{ 75{
121 struct aob_rq_header *aobrq = (void *) aob->request.data; 76 struct aob_rq_header *aobrq = (void *) aob->request.data;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 5e1e12c0cf42..0a7325361d29 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2025,7 +2025,8 @@ static struct scsi_host_template driver_template = {
2025 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 2025 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2026 .use_clustering = ENABLE_CLUSTERING, 2026 .use_clustering = ENABLE_CLUSTERING,
2027 .shost_attrs = twa_host_attrs, 2027 .shost_attrs = twa_host_attrs,
2028 .emulated = 1 2028 .emulated = 1,
2029 .no_write_same = 1,
2029}; 2030};
2030 2031
2031/* This function will probe and initialize a card */ 2032/* This function will probe and initialize a card */
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index c845bdbeb6c0..4de346017e9f 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1600,7 +1600,8 @@ static struct scsi_host_template driver_template = {
1600 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 1600 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1601 .use_clustering = ENABLE_CLUSTERING, 1601 .use_clustering = ENABLE_CLUSTERING,
1602 .shost_attrs = twl_host_attrs, 1602 .shost_attrs = twl_host_attrs,
1603 .emulated = 1 1603 .emulated = 1,
1604 .no_write_same = 1,
1604}; 1605};
1605 1606
1606/* This function will probe and initialize a card */ 1607/* This function will probe and initialize a card */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index b9276d10b25c..752624e6bc00 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2279,7 +2279,8 @@ static struct scsi_host_template driver_template = {
2279 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 2279 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2280 .use_clustering = ENABLE_CLUSTERING, 2280 .use_clustering = ENABLE_CLUSTERING,
2281 .shost_attrs = tw_host_attrs, 2281 .shost_attrs = tw_host_attrs,
2282 .emulated = 1 2282 .emulated = 1,
2283 .no_write_same = 1,
2283}; 2284};
2284 2285
2285/* This function will probe and initialize a card */ 2286/* This function will probe and initialize a card */
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index d85ac1a9d2c0..fbcd48d0bfc3 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -511,7 +511,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
511 goto cleanup; 511 goto cleanup;
512 } 512 }
513 513
514 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) { 514 if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
515 (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
515 rcode = -EINVAL; 516 rcode = -EINVAL;
516 goto cleanup; 517 goto cleanup;
517 } 518 }
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index f0d432c139d0..4921ed19a027 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1081,6 +1081,7 @@ static struct scsi_host_template aac_driver_template = {
1081#endif 1081#endif
1082 .use_clustering = ENABLE_CLUSTERING, 1082 .use_clustering = ENABLE_CLUSTERING,
1083 .emulated = 1, 1083 .emulated = 1,
1084 .no_write_same = 1,
1084}; 1085};
1085 1086
1086static void __aac_shutdown(struct aac_dev * aac) 1087static void __aac_shutdown(struct aac_dev * aac)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 97fd450aff09..4f6a30b8e5f9 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -137,6 +137,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
137 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, 137 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
138 .use_clustering = ENABLE_CLUSTERING, 138 .use_clustering = ENABLE_CLUSTERING,
139 .shost_attrs = arcmsr_host_attrs, 139 .shost_attrs = arcmsr_host_attrs,
140 .no_write_same = 1,
140}; 141};
141static struct pci_device_id arcmsr_device_id_table[] = { 142static struct pci_device_id arcmsr_device_id_table[] = {
142 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, 143 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 94d5d0102f7d..42bcb970445a 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -296,6 +296,7 @@ wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
296struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, 296struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
297 u16 vf_id, wwn_t lpwwn); 297 u16 vf_id, wwn_t lpwwn);
298 298
299void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname);
299void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, 300void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
300 struct bfa_lport_info_s *port_info); 301 struct bfa_lport_info_s *port_info);
301void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, 302void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port,
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 2f61a5af3658..f5e4e61a0fd7 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1097,6 +1097,17 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
1097 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 1097 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
1098} 1098}
1099 1099
1100void
1101bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port,
1102 char *symname)
1103{
1104 strcpy(port->port_cfg.sym_name.symname, symname);
1105
1106 if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
1107 bfa_fcs_lport_ns_util_send_rspn_id(
1108 BFA_FCS_GET_NS_FROM_PORT(port), NULL);
1109}
1110
1100/* 1111/*
1101 * fcs_lport_api 1112 * fcs_lport_api
1102 */ 1113 */
@@ -5140,9 +5151,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
5140 u8 *psymbl = &symbl[0]; 5151 u8 *psymbl = &symbl[0];
5141 int len; 5152 int len;
5142 5153
5143 if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
5144 return;
5145
5146 /* Avoid sending RSPN in the following states. */ 5154 /* Avoid sending RSPN in the following states. */
5147 if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) || 5155 if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||
5148 bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) || 5156 bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) ||
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index e9a681d31223..40be670a1cbc 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -593,11 +593,8 @@ bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
593 return; 593 return;
594 594
595 spin_lock_irqsave(&bfad->bfad_lock, flags); 595 spin_lock_irqsave(&bfad->bfad_lock, flags);
596 if (strlen(sym_name) > 0) { 596 if (strlen(sym_name) > 0)
597 strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name); 597 bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name);
598 bfa_fcs_lport_ns_util_send_rspn_id(
599 BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL);
600 }
601 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 598 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
602} 599}
603 600
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index ee4fa40a50b1..ce5ef0190bad 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4684,6 +4684,7 @@ static struct scsi_host_template gdth_template = {
4684 .cmd_per_lun = GDTH_MAXC_P_L, 4684 .cmd_per_lun = GDTH_MAXC_P_L,
4685 .unchecked_isa_dma = 1, 4685 .unchecked_isa_dma = 1,
4686 .use_clustering = ENABLE_CLUSTERING, 4686 .use_clustering = ENABLE_CLUSTERING,
4687 .no_write_same = 1,
4687}; 4688};
4688 4689
4689#ifdef CONFIG_ISA 4690#ifdef CONFIG_ISA
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f334859024c0..f2c5005f312a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -395,6 +395,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
395 shost->use_clustering = sht->use_clustering; 395 shost->use_clustering = sht->use_clustering;
396 shost->ordered_tag = sht->ordered_tag; 396 shost->ordered_tag = sht->ordered_tag;
397 shost->eh_deadline = shost_eh_deadline * HZ; 397 shost->eh_deadline = shost_eh_deadline * HZ;
398 shost->no_write_same = sht->no_write_same;
398 399
399 if (sht->supported_mode == MODE_UNKNOWN) 400 if (sht->supported_mode == MODE_UNKNOWN)
400 /* means we didn't set it ... default to INITIATOR */ 401 /* means we didn't set it ... default to INITIATOR */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 22f6432eb475..20a5e6ecf945 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -561,6 +561,7 @@ static struct scsi_host_template hpsa_driver_template = {
561 .sdev_attrs = hpsa_sdev_attrs, 561 .sdev_attrs = hpsa_sdev_attrs,
562 .shost_attrs = hpsa_shost_attrs, 562 .shost_attrs = hpsa_shost_attrs,
563 .max_sectors = 8192, 563 .max_sectors = 8192,
564 .no_write_same = 1,
564}; 565};
565 566
566 567
@@ -1288,7 +1289,7 @@ static void complete_scsi_command(struct CommandList *cp)
1288 "has check condition: aborted command: " 1289 "has check condition: aborted command: "
1289 "ASC: 0x%x, ASCQ: 0x%x\n", 1290 "ASC: 0x%x, ASCQ: 0x%x\n",
1290 cp, asc, ascq); 1291 cp, asc, ascq);
1291 cmd->result = DID_SOFT_ERROR << 16; 1292 cmd->result |= DID_SOFT_ERROR << 16;
1292 break; 1293 break;
1293 } 1294 }
1294 /* Must be some other type of check condition */ 1295 /* Must be some other type of check condition */
@@ -4925,7 +4926,7 @@ reinit_after_soft_reset:
4925 hpsa_hba_inquiry(h); 4926 hpsa_hba_inquiry(h);
4926 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 4927 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
4927 start_controller_lockup_detector(h); 4928 start_controller_lockup_detector(h);
4928 return 1; 4929 return 0;
4929 4930
4930clean4: 4931clean4:
4931 hpsa_free_sg_chain_blocks(h); 4932 hpsa_free_sg_chain_blocks(h);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 36ac1c34ce97..573f4128b6b6 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6305,7 +6305,8 @@ static struct scsi_host_template driver_template = {
6305 .use_clustering = ENABLE_CLUSTERING, 6305 .use_clustering = ENABLE_CLUSTERING,
6306 .shost_attrs = ipr_ioa_attrs, 6306 .shost_attrs = ipr_ioa_attrs,
6307 .sdev_attrs = ipr_dev_attrs, 6307 .sdev_attrs = ipr_dev_attrs,
6308 .proc_name = IPR_NAME 6308 .proc_name = IPR_NAME,
6309 .no_write_same = 1,
6309}; 6310};
6310 6311
6311/** 6312/**
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 8d5ea8a1e5a6..52a216f21ae5 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -374,6 +374,7 @@ static struct scsi_host_template ips_driver_template = {
374 .sg_tablesize = IPS_MAX_SG, 374 .sg_tablesize = IPS_MAX_SG,
375 .cmd_per_lun = 3, 375 .cmd_per_lun = 3,
376 .use_clustering = ENABLE_CLUSTERING, 376 .use_clustering = ENABLE_CLUSTERING,
377 .no_write_same = 1,
377}; 378};
378 379
379 380
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 161c98efade9..d2895836f9fa 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -211,7 +211,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
211 qc->tf.nsect = 0; 211 qc->tf.nsect = 0;
212 } 212 }
213 213
214 ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis); 214 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
215 task->uldd_task = qc; 215 task->uldd_task = qc;
216 if (ata_is_atapi(qc->tf.protocol)) { 216 if (ata_is_atapi(qc->tf.protocol)) {
217 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); 217 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 90c95a3385d1..816db12ef5d5 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4244,6 +4244,7 @@ static struct scsi_host_template megaraid_template = {
4244 .eh_device_reset_handler = megaraid_reset, 4244 .eh_device_reset_handler = megaraid_reset,
4245 .eh_bus_reset_handler = megaraid_reset, 4245 .eh_bus_reset_handler = megaraid_reset,
4246 .eh_host_reset_handler = megaraid_reset, 4246 .eh_host_reset_handler = megaraid_reset,
4247 .no_write_same = 1,
4247}; 4248};
4248 4249
4249static int 4250static int
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index d1a4b82836ea..e2237a97cb9d 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -367,6 +367,7 @@ static struct scsi_host_template megaraid_template_g = {
367 .eh_host_reset_handler = megaraid_reset_handler, 367 .eh_host_reset_handler = megaraid_reset_handler,
368 .change_queue_depth = megaraid_change_queue_depth, 368 .change_queue_depth = megaraid_change_queue_depth,
369 .use_clustering = ENABLE_CLUSTERING, 369 .use_clustering = ENABLE_CLUSTERING,
370 .no_write_same = 1,
370 .sdev_attrs = megaraid_sdev_attrs, 371 .sdev_attrs = megaraid_sdev_attrs,
371 .shost_attrs = megaraid_shost_attrs, 372 .shost_attrs = megaraid_shost_attrs,
372}; 373};
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0a743a5d1647..c99812bf2a73 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2148,6 +2148,7 @@ static struct scsi_host_template megasas_template = {
2148 .bios_param = megasas_bios_param, 2148 .bios_param = megasas_bios_param,
2149 .use_clustering = ENABLE_CLUSTERING, 2149 .use_clustering = ENABLE_CLUSTERING,
2150 .change_queue_depth = megasas_change_queue_depth, 2150 .change_queue_depth = megasas_change_queue_depth,
2151 .no_write_same = 1,
2151}; 2152};
2152 2153
2153/** 2154/**
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index f16ece91b94a..0a1296a87d66 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3403,6 +3403,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3403 unsigned long flags; 3403 unsigned long flags;
3404 u8 deviceType = pPayload->sas_identify.dev_type; 3404 u8 deviceType = pPayload->sas_identify.dev_type;
3405 port->port_state = portstate; 3405 port->port_state = portstate;
3406 phy->phy_state = PHY_STATE_LINK_UP_SPC;
3406 PM8001_MSG_DBG(pm8001_ha, 3407 PM8001_MSG_DBG(pm8001_ha,
3407 pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n", 3408 pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
3408 port_id, phy_id)); 3409 port_id, phy_id));
@@ -3483,6 +3484,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3483 pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d," 3484 pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
3484 " phy id = %d\n", port_id, phy_id)); 3485 " phy id = %d\n", port_id, phy_id));
3485 port->port_state = portstate; 3486 port->port_state = portstate;
3487 phy->phy_state = PHY_STATE_LINK_UP_SPC;
3486 port->port_attached = 1; 3488 port->port_attached = 1;
3487 pm8001_get_lrate_mode(phy, link_rate); 3489 pm8001_get_lrate_mode(phy, link_rate);
3488 phy->phy_type |= PORT_TYPE_SATA; 3490 phy->phy_type |= PORT_TYPE_SATA;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 6d91e2446542..e4867e690c84 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,6 +131,10 @@
131#define LINKRATE_30 (0x02 << 8) 131#define LINKRATE_30 (0x02 << 8)
132#define LINKRATE_60 (0x04 << 8) 132#define LINKRATE_60 (0x04 << 8)
133 133
134/* for phy state */
135
136#define PHY_STATE_LINK_UP_SPC 0x1
137
134/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ 138/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
135#define GSM_SM_BASE 0x4F0000 139#define GSM_SM_BASE 0x4F0000
136struct mpi_msg_hdr{ 140struct mpi_msg_hdr{
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 34f5f5ffef05..73a120d81b4d 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -175,20 +175,16 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
175static void pm8001_tasklet(unsigned long opaque) 175static void pm8001_tasklet(unsigned long opaque)
176{ 176{
177 struct pm8001_hba_info *pm8001_ha; 177 struct pm8001_hba_info *pm8001_ha;
178 u32 vec; 178 struct isr_param *irq_vector;
179 pm8001_ha = (struct pm8001_hba_info *)opaque; 179
180 irq_vector = (struct isr_param *)opaque;
181 pm8001_ha = irq_vector->drv_inst;
180 if (unlikely(!pm8001_ha)) 182 if (unlikely(!pm8001_ha))
181 BUG_ON(1); 183 BUG_ON(1);
182 vec = pm8001_ha->int_vector; 184 PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
183 PM8001_CHIP_DISP->isr(pm8001_ha, vec);
184} 185}
185#endif 186#endif
186 187
187static struct pm8001_hba_info *outq_to_hba(u8 *outq)
188{
189 return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
190}
191
192/** 188/**
193 * pm8001_interrupt_handler_msix - main MSIX interrupt handler. 189 * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
194 * It obtains the vector number and calls the equivalent bottom 190 * It obtains the vector number and calls the equivalent bottom
@@ -198,18 +194,20 @@ static struct pm8001_hba_info *outq_to_hba(u8 *outq)
198 */ 194 */
199static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) 195static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
200{ 196{
201 struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque); 197 struct isr_param *irq_vector;
202 u8 outq = *(u8 *)opaque; 198 struct pm8001_hba_info *pm8001_ha;
203 irqreturn_t ret = IRQ_HANDLED; 199 irqreturn_t ret = IRQ_HANDLED;
200 irq_vector = (struct isr_param *)opaque;
201 pm8001_ha = irq_vector->drv_inst;
202
204 if (unlikely(!pm8001_ha)) 203 if (unlikely(!pm8001_ha))
205 return IRQ_NONE; 204 return IRQ_NONE;
206 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) 205 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
207 return IRQ_NONE; 206 return IRQ_NONE;
208 pm8001_ha->int_vector = outq;
209#ifdef PM8001_USE_TASKLET 207#ifdef PM8001_USE_TASKLET
210 tasklet_schedule(&pm8001_ha->tasklet); 208 tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);
211#else 209#else
212 ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq); 210 ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
213#endif 211#endif
214 return ret; 212 return ret;
215} 213}
@@ -230,9 +228,8 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
230 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) 228 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
231 return IRQ_NONE; 229 return IRQ_NONE;
232 230
233 pm8001_ha->int_vector = 0;
234#ifdef PM8001_USE_TASKLET 231#ifdef PM8001_USE_TASKLET
235 tasklet_schedule(&pm8001_ha->tasklet); 232 tasklet_schedule(&pm8001_ha->tasklet[0]);
236#else 233#else
237 ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); 234 ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
238#endif 235#endif
@@ -457,7 +454,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
457{ 454{
458 struct pm8001_hba_info *pm8001_ha; 455 struct pm8001_hba_info *pm8001_ha;
459 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 456 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
460 457 int j;
461 458
462 pm8001_ha = sha->lldd_ha; 459 pm8001_ha = sha->lldd_ha;
463 if (!pm8001_ha) 460 if (!pm8001_ha)
@@ -480,12 +477,14 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
480 pm8001_ha->iomb_size = IOMB_SIZE_SPC; 477 pm8001_ha->iomb_size = IOMB_SIZE_SPC;
481 478
482#ifdef PM8001_USE_TASKLET 479#ifdef PM8001_USE_TASKLET
483 /** 480 /* Tasklet for non msi-x interrupt handler */
484 * default tasklet for non msi-x interrupt handler/first msi-x 481 if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
485 * interrupt handler 482 tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
486 **/ 483 (unsigned long)&(pm8001_ha->irq_vector[0]));
487 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, 484 else
488 (unsigned long)pm8001_ha); 485 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
486 tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
487 (unsigned long)&(pm8001_ha->irq_vector[j]));
489#endif 488#endif
490 pm8001_ioremap(pm8001_ha); 489 pm8001_ioremap(pm8001_ha);
491 if (!pm8001_alloc(pm8001_ha, ent)) 490 if (!pm8001_alloc(pm8001_ha, ent))
@@ -733,19 +732,20 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
733 "pci_enable_msix request ret:%d no of intr %d\n", 732 "pci_enable_msix request ret:%d no of intr %d\n",
734 rc, pm8001_ha->number_of_intr)); 733 rc, pm8001_ha->number_of_intr));
735 734
736 for (i = 0; i < number_of_intr; i++)
737 pm8001_ha->outq[i] = i;
738 735
739 for (i = 0; i < number_of_intr; i++) { 736 for (i = 0; i < number_of_intr; i++) {
740 snprintf(intr_drvname[i], sizeof(intr_drvname[0]), 737 snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
741 DRV_NAME"%d", i); 738 DRV_NAME"%d", i);
739 pm8001_ha->irq_vector[i].irq_id = i;
740 pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
741
742 if (request_irq(pm8001_ha->msix_entries[i].vector, 742 if (request_irq(pm8001_ha->msix_entries[i].vector,
743 pm8001_interrupt_handler_msix, flag, 743 pm8001_interrupt_handler_msix, flag,
744 intr_drvname[i], &pm8001_ha->outq[i])) { 744 intr_drvname[i], &(pm8001_ha->irq_vector[i]))) {
745 for (j = 0; j < i; j++) 745 for (j = 0; j < i; j++)
746 free_irq( 746 free_irq(
747 pm8001_ha->msix_entries[j].vector, 747 pm8001_ha->msix_entries[j].vector,
748 &pm8001_ha->outq[j]); 748 &(pm8001_ha->irq_vector[i]));
749 pci_disable_msix(pm8001_ha->pdev); 749 pci_disable_msix(pm8001_ha->pdev);
750 break; 750 break;
751 } 751 }
@@ -907,7 +907,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
907{ 907{
908 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 908 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
909 struct pm8001_hba_info *pm8001_ha; 909 struct pm8001_hba_info *pm8001_ha;
910 int i; 910 int i, j;
911 pm8001_ha = sha->lldd_ha; 911 pm8001_ha = sha->lldd_ha;
912 sas_unregister_ha(sha); 912 sas_unregister_ha(sha);
913 sas_remove_host(pm8001_ha->shost); 913 sas_remove_host(pm8001_ha->shost);
@@ -921,13 +921,18 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
921 synchronize_irq(pm8001_ha->msix_entries[i].vector); 921 synchronize_irq(pm8001_ha->msix_entries[i].vector);
922 for (i = 0; i < pm8001_ha->number_of_intr; i++) 922 for (i = 0; i < pm8001_ha->number_of_intr; i++)
923 free_irq(pm8001_ha->msix_entries[i].vector, 923 free_irq(pm8001_ha->msix_entries[i].vector,
924 &pm8001_ha->outq[i]); 924 &(pm8001_ha->irq_vector[i]));
925 pci_disable_msix(pdev); 925 pci_disable_msix(pdev);
926#else 926#else
927 free_irq(pm8001_ha->irq, sha); 927 free_irq(pm8001_ha->irq, sha);
928#endif 928#endif
929#ifdef PM8001_USE_TASKLET 929#ifdef PM8001_USE_TASKLET
930 tasklet_kill(&pm8001_ha->tasklet); 930 /* For non-msix and msix interrupts */
931 if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
932 tasklet_kill(&pm8001_ha->tasklet[0]);
933 else
934 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
935 tasklet_kill(&pm8001_ha->tasklet[j]);
931#endif 936#endif
932 pm8001_free(pm8001_ha); 937 pm8001_free(pm8001_ha);
933 kfree(sha->sas_phy); 938 kfree(sha->sas_phy);
@@ -948,7 +953,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
948{ 953{
949 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 954 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
950 struct pm8001_hba_info *pm8001_ha; 955 struct pm8001_hba_info *pm8001_ha;
951 int i; 956 int i, j;
952 u32 device_state; 957 u32 device_state;
953 pm8001_ha = sha->lldd_ha; 958 pm8001_ha = sha->lldd_ha;
954 flush_workqueue(pm8001_wq); 959 flush_workqueue(pm8001_wq);
@@ -964,13 +969,18 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
964 synchronize_irq(pm8001_ha->msix_entries[i].vector); 969 synchronize_irq(pm8001_ha->msix_entries[i].vector);
965 for (i = 0; i < pm8001_ha->number_of_intr; i++) 970 for (i = 0; i < pm8001_ha->number_of_intr; i++)
966 free_irq(pm8001_ha->msix_entries[i].vector, 971 free_irq(pm8001_ha->msix_entries[i].vector,
967 &pm8001_ha->outq[i]); 972 &(pm8001_ha->irq_vector[i]));
968 pci_disable_msix(pdev); 973 pci_disable_msix(pdev);
969#else 974#else
970 free_irq(pm8001_ha->irq, sha); 975 free_irq(pm8001_ha->irq, sha);
971#endif 976#endif
972#ifdef PM8001_USE_TASKLET 977#ifdef PM8001_USE_TASKLET
973 tasklet_kill(&pm8001_ha->tasklet); 978 /* For non-msix and msix interrupts */
979 if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
980 tasklet_kill(&pm8001_ha->tasklet[0]);
981 else
982 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
983 tasklet_kill(&pm8001_ha->tasklet[j]);
974#endif 984#endif
975 device_state = pci_choose_state(pdev, state); 985 device_state = pci_choose_state(pdev, state);
976 pm8001_printk("pdev=0x%p, slot=%s, entering " 986 pm8001_printk("pdev=0x%p, slot=%s, entering "
@@ -993,7 +1003,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
993 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 1003 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
994 struct pm8001_hba_info *pm8001_ha; 1004 struct pm8001_hba_info *pm8001_ha;
995 int rc; 1005 int rc;
996 u8 i = 0; 1006 u8 i = 0, j;
997 u32 device_state; 1007 u32 device_state;
998 pm8001_ha = sha->lldd_ha; 1008 pm8001_ha = sha->lldd_ha;
999 device_state = pdev->current_state; 1009 device_state = pdev->current_state;
@@ -1033,10 +1043,14 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
1033 if (rc) 1043 if (rc)
1034 goto err_out_disable; 1044 goto err_out_disable;
1035#ifdef PM8001_USE_TASKLET 1045#ifdef PM8001_USE_TASKLET
1036 /* default tasklet for non msi-x interrupt handler/first msi-x 1046 /* Tasklet for non msi-x interrupt handler */
1037 * interrupt handler */ 1047 if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
1038 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, 1048 tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
1039 (unsigned long)pm8001_ha); 1049 (unsigned long)&(pm8001_ha->irq_vector[0]));
1050 else
1051 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
1052 tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
1053 (unsigned long)&(pm8001_ha->irq_vector[j]));
1040#endif 1054#endif
1041 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); 1055 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
1042 if (pm8001_ha->chip_id != chip_8001) { 1056 if (pm8001_ha->chip_id != chip_8001) {
@@ -1169,6 +1183,7 @@ module_exit(pm8001_exit);
1169MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); 1183MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
1170MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>"); 1184MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
1171MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>"); 1185MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
1186MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>");
1172MODULE_DESCRIPTION( 1187MODULE_DESCRIPTION(
1173 "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 " 1188 "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
1174 "SAS/SATA controller driver"); 1189 "SAS/SATA controller driver");
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f4eb18e51631..f50ac44b950e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1098,15 +1098,17 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
1098 struct pm8001_tmf_task tmf_task; 1098 struct pm8001_tmf_task tmf_task;
1099 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1099 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1100 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1100 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1101 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1101 if (dev_is_sata(dev)) { 1102 if (dev_is_sata(dev)) {
1102 struct sas_phy *phy = sas_get_local_phy(dev); 1103 struct sas_phy *phy = sas_get_local_phy(dev);
1103 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1104 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1104 dev, 1, 0); 1105 dev, 1, 0);
1105 rc = sas_phy_reset(phy, 1); 1106 rc = sas_phy_reset(phy, 1);
1106 sas_put_local_phy(phy); 1107 sas_put_local_phy(phy);
1108 pm8001_dev->setds_completion = &completion_setstate;
1107 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1109 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1108 pm8001_dev, 0x01); 1110 pm8001_dev, 0x01);
1109 msleep(2000); 1111 wait_for_completion(&completion_setstate);
1110 } else { 1112 } else {
1111 tmf_task.tmf = TMF_LU_RESET; 1113 tmf_task.tmf = TMF_LU_RESET;
1112 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1114 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6037d477a183..6c5fd5ee22d3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -466,6 +466,10 @@ struct pm8001_hba_memspace {
466 u64 membase; 466 u64 membase;
467 u32 memsize; 467 u32 memsize;
468}; 468};
469struct isr_param {
470 struct pm8001_hba_info *drv_inst;
471 u32 irq_id;
472};
469struct pm8001_hba_info { 473struct pm8001_hba_info {
470 char name[PM8001_NAME_LENGTH]; 474 char name[PM8001_NAME_LENGTH];
471 struct list_head list; 475 struct list_head list;
@@ -519,14 +523,13 @@ struct pm8001_hba_info {
519 int number_of_intr;/*will be used in remove()*/ 523 int number_of_intr;/*will be used in remove()*/
520#endif 524#endif
521#ifdef PM8001_USE_TASKLET 525#ifdef PM8001_USE_TASKLET
522 struct tasklet_struct tasklet; 526 struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC];
523#endif 527#endif
524 u32 logging_level; 528 u32 logging_level;
525 u32 fw_status; 529 u32 fw_status;
526 u32 smp_exp_mode; 530 u32 smp_exp_mode;
527 u32 int_vector;
528 const struct firmware *fw_image; 531 const struct firmware *fw_image;
529 u8 outq[PM8001_MAX_MSIX_VEC]; 532 struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
530}; 533};
531 534
532struct pm8001_work { 535struct pm8001_work {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 8987b1706216..c950dc5c9943 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2894,6 +2894,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2894 unsigned long flags; 2894 unsigned long flags;
2895 u8 deviceType = pPayload->sas_identify.dev_type; 2895 u8 deviceType = pPayload->sas_identify.dev_type;
2896 port->port_state = portstate; 2896 port->port_state = portstate;
2897 phy->phy_state = PHY_STATE_LINK_UP_SPCV;
2897 PM8001_MSG_DBG(pm8001_ha, pm8001_printk( 2898 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
2898 "portid:%d; phyid:%d; linkrate:%d; " 2899 "portid:%d; phyid:%d; linkrate:%d; "
2899 "portstate:%x; devicetype:%x\n", 2900 "portstate:%x; devicetype:%x\n",
@@ -2978,6 +2979,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2978 port_id, phy_id, link_rate, portstate)); 2979 port_id, phy_id, link_rate, portstate));
2979 2980
2980 port->port_state = portstate; 2981 port->port_state = portstate;
2982 phy->phy_state = PHY_STATE_LINK_UP_SPCV;
2981 port->port_attached = 1; 2983 port->port_attached = 1;
2982 pm8001_get_lrate_mode(phy, link_rate); 2984 pm8001_get_lrate_mode(phy, link_rate);
2983 phy->phy_type |= PORT_TYPE_SATA; 2985 phy->phy_type |= PORT_TYPE_SATA;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index c86816bea424..9970a385795d 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -215,6 +215,8 @@
215#define SAS_DOPNRJT_RTRY_TMO 128 215#define SAS_DOPNRJT_RTRY_TMO 128
216#define SAS_COPNRJT_RTRY_TMO 128 216#define SAS_COPNRJT_RTRY_TMO 128
217 217
218/* for phy state */
219#define PHY_STATE_LINK_UP_SPCV 0x2
218/* 220/*
219 Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. 221 Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
220 Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 222 Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e43db7742047..be8ce54f99b2 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1404,11 +1404,22 @@ enum {
1404}; 1404};
1405#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) 1405#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
1406 1406
1407static struct genl_multicast_group pmcraid_mcgrps[] = {
1408 { .name = "events", /* not really used - see ID discussion below */ },
1409};
1410
1407static struct genl_family pmcraid_event_family = { 1411static struct genl_family pmcraid_event_family = {
1408 .id = GENL_ID_GENERATE, 1412 /*
1413 * Due to prior multicast group abuse (the code having assumed that
1414 * the family ID can be used as a multicast group ID) we need to
1415 * statically allocate a family (and thus group) ID.
1416 */
1417 .id = GENL_ID_PMCRAID,
1409 .name = "pmcraid", 1418 .name = "pmcraid",
1410 .version = 1, 1419 .version = 1,
1411 .maxattr = PMCRAID_AEN_ATTR_MAX 1420 .maxattr = PMCRAID_AEN_ATTR_MAX,
1421 .mcgrps = pmcraid_mcgrps,
1422 .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps),
1412}; 1423};
1413 1424
1414/** 1425/**
@@ -1511,8 +1522,8 @@ static int pmcraid_notify_aen(
1511 return result; 1522 return result;
1512 } 1523 }
1513 1524
1514 result = 1525 result = genlmsg_multicast(&pmcraid_event_family, skb,
1515 genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC); 1526 0, 0, GFP_ATOMIC);
1516 1527
1517 /* If there are no listeners, genlmsg_multicast may return non-zero 1528 /* If there are no listeners, genlmsg_multicast may return non-zero
1518 * value. 1529 * value.
@@ -4314,6 +4325,7 @@ static struct scsi_host_template pmcraid_host_template = {
4314 .this_id = -1, 4325 .this_id = -1,
4315 .sg_tablesize = PMCRAID_MAX_IOADLS, 4326 .sg_tablesize = PMCRAID_MAX_IOADLS,
4316 .max_sectors = PMCRAID_IOA_MAX_SECTORS, 4327 .max_sectors = PMCRAID_IOA_MAX_SECTORS,
4328 .no_write_same = 1,
4317 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, 4329 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
4318 .use_clustering = ENABLE_CLUSTERING, 4330 .use_clustering = ENABLE_CLUSTERING,
4319 .shost_attrs = pmcraid_host_attrs, 4331 .shost_attrs = pmcraid_host_attrs,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index f85b9e5c1f05..7eb19be35d46 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -330,7 +330,7 @@ static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
330 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 330 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
331 struct tcm_qla2xxx_tpg, se_tpg); 331 struct tcm_qla2xxx_tpg, se_tpg);
332 332
333 return QLA_TPG_ATTRIB(tpg)->generate_node_acls; 333 return tpg->tpg_attrib.generate_node_acls;
334} 334}
335 335
336static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) 336static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
@@ -338,7 +338,7 @@ static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
338 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 338 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
339 struct tcm_qla2xxx_tpg, se_tpg); 339 struct tcm_qla2xxx_tpg, se_tpg);
340 340
341 return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls; 341 return tpg->tpg_attrib.cache_dynamic_acls;
342} 342}
343 343
344static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) 344static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
@@ -346,7 +346,7 @@ static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
346 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 346 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
347 struct tcm_qla2xxx_tpg, se_tpg); 347 struct tcm_qla2xxx_tpg, se_tpg);
348 348
349 return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect; 349 return tpg->tpg_attrib.demo_mode_write_protect;
350} 350}
351 351
352static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) 352static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
@@ -354,7 +354,7 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
354 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 354 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
355 struct tcm_qla2xxx_tpg, se_tpg); 355 struct tcm_qla2xxx_tpg, se_tpg);
356 356
357 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; 357 return tpg->tpg_attrib.prod_mode_write_protect;
358} 358}
359 359
360static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) 360static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
@@ -362,7 +362,7 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg
362 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 362 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
363 struct tcm_qla2xxx_tpg, se_tpg); 363 struct tcm_qla2xxx_tpg, se_tpg);
364 364
365 return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only; 365 return tpg->tpg_attrib.demo_mode_login_only;
366} 366}
367 367
368static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( 368static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
@@ -847,7 +847,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
847 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ 847 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
848 struct tcm_qla2xxx_tpg, se_tpg); \ 848 struct tcm_qla2xxx_tpg, se_tpg); \
849 \ 849 \
850 return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \ 850 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
851} \ 851} \
852 \ 852 \
853static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ 853static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
@@ -1027,10 +1027,10 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
1027 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic 1027 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1028 * NodeACLs 1028 * NodeACLs
1029 */ 1029 */
1030 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; 1030 tpg->tpg_attrib.generate_node_acls = 1;
1031 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; 1031 tpg->tpg_attrib.demo_mode_write_protect = 1;
1032 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; 1032 tpg->tpg_attrib.cache_dynamic_acls = 1;
1033 QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1; 1033 tpg->tpg_attrib.demo_mode_login_only = 1;
1034 1034
1035 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, 1035 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
1036 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1036 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -1830,16 +1830,16 @@ static int tcm_qla2xxx_register_configfs(void)
1830 /* 1830 /*
1831 * Setup default attribute lists for various fabric->tf_cit_tmpl 1831 * Setup default attribute lists for various fabric->tf_cit_tmpl
1832 */ 1832 */
1833 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1833 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1834 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; 1834 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
1835 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = 1835 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs =
1836 tcm_qla2xxx_tpg_attrib_attrs; 1836 tcm_qla2xxx_tpg_attrib_attrs;
1837 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1837 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1838 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1838 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
1839 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1839 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1840 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1840 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1841 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1841 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1842 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1842 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1843 /* 1843 /*
1844 * Register the fabric for use within TCM 1844 * Register the fabric for use within TCM
1845 */ 1845 */
@@ -1870,15 +1870,15 @@ static int tcm_qla2xxx_register_configfs(void)
1870 /* 1870 /*
1871 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl 1871 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
1872 */ 1872 */
1873 TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1873 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1874 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL; 1874 npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
1875 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 1875 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
1876 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1876 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1877 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1877 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
1878 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1878 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1879 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1879 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1880 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1880 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1881 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1881 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1882 /* 1882 /*
1883 * Register the npiv_fabric for use within TCM 1883 * Register the npiv_fabric for use within TCM
1884 */ 1884 */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 329327528a55..771f7b816443 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -45,8 +45,6 @@ struct tcm_qla2xxx_tpg {
45 struct se_portal_group se_tpg; 45 struct se_portal_group se_tpg;
46}; 46};
47 47
48#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
49
50struct tcm_qla2xxx_fc_loopid { 48struct tcm_qla2xxx_fc_loopid {
51 struct se_node_acl *se_nacl; 49 struct se_node_acl *se_nacl;
52}; 50};
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e6c4bff04339..69725f7c32c1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2659,6 +2659,12 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
2659{ 2659{
2660 struct scsi_device *sdev = sdkp->device; 2660 struct scsi_device *sdev = sdkp->device;
2661 2661
2662 if (sdev->host->no_write_same) {
2663 sdev->no_write_same = 1;
2664
2665 return;
2666 }
2667
2662 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { 2668 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
2663 /* too large values might cause issues with arcmsr */ 2669 /* too large values might cause issues with arcmsr */
2664 int vpd_buf_len = 64; 2670 int vpd_buf_len = 64;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 1a28f5632797..17d740427240 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1697,6 +1697,7 @@ static struct scsi_host_template scsi_driver = {
1697 .use_clustering = DISABLE_CLUSTERING, 1697 .use_clustering = DISABLE_CLUSTERING,
1698 /* Make sure we dont get a sg segment crosses a page boundary */ 1698 /* Make sure we dont get a sg segment crosses a page boundary */
1699 .dma_boundary = PAGE_SIZE-1, 1699 .dma_boundary = PAGE_SIZE-1,
1700 .no_write_same = 1,
1700}; 1701};
1701 1702
1702enum { 1703enum {
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 3ed666fe840a..9025edd7dc45 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -377,7 +377,7 @@ out_master_put:
377 377
378static int bcm2835_spi_remove(struct platform_device *pdev) 378static int bcm2835_spi_remove(struct platform_device *pdev)
379{ 379{
380 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 380 struct spi_master *master = platform_get_drvdata(pdev);
381 struct bcm2835_spi *bs = spi_master_get_devdata(master); 381 struct bcm2835_spi *bs = spi_master_get_devdata(master);
382 382
383 free_irq(bs->irq, master); 383 free_irq(bs->irq, master);
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 80d56b214eb5..469ecd876358 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -435,7 +435,7 @@ out:
435 435
436static int bcm63xx_spi_remove(struct platform_device *pdev) 436static int bcm63xx_spi_remove(struct platform_device *pdev)
437{ 437{
438 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 438 struct spi_master *master = platform_get_drvdata(pdev);
439 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 439 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
440 440
441 /* reset spi block */ 441 /* reset spi block */
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index b9f0192758d6..6d207afec8cb 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
150 &dws->tx_sgl, 150 &dws->tx_sgl,
151 1, 151 1,
152 DMA_MEM_TO_DEV, 152 DMA_MEM_TO_DEV,
153 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); 153 DMA_PREP_INTERRUPT);
154 txdesc->callback = dw_spi_dma_done; 154 txdesc->callback = dw_spi_dma_done;
155 txdesc->callback_param = dws; 155 txdesc->callback_param = dws;
156 156
@@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
173 &dws->rx_sgl, 173 &dws->rx_sgl,
174 1, 174 1,
175 DMA_DEV_TO_MEM, 175 DMA_DEV_TO_MEM,
176 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); 176 DMA_PREP_INTERRUPT);
177 rxdesc->callback = dw_spi_dma_done; 177 rxdesc->callback = dw_spi_dma_done;
178 rxdesc->callback_param = dws; 178 rxdesc->callback_param = dws;
179 179
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 9602bbd8d7ea..87676587d783 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -557,7 +557,7 @@ free_master:
557 557
558static int mpc512x_psc_spi_do_remove(struct device *dev) 558static int mpc512x_psc_spi_do_remove(struct device *dev)
559{ 559{
560 struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); 560 struct spi_master *master = dev_get_drvdata(dev);
561 struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); 561 struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
562 562
563 clk_disable_unprepare(mps->clk_mclk); 563 clk_disable_unprepare(mps->clk_mclk);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 73afb56c08cc..3adebfa22e3d 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -565,7 +565,7 @@ static int mxs_spi_remove(struct platform_device *pdev)
565 struct mxs_spi *spi; 565 struct mxs_spi *spi;
566 struct mxs_ssp *ssp; 566 struct mxs_ssp *ssp;
567 567
568 master = spi_master_get(platform_get_drvdata(pdev)); 568 master = platform_get_drvdata(pdev);
569 spi = spi_master_get_devdata(master); 569 spi = spi_master_get_devdata(master);
570 ssp = &spi->ssp; 570 ssp = &spi->ssp;
571 571
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index cb0e1f1137ad..7765b1999537 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1073,6 +1073,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1073static struct acpi_device_id pxa2xx_spi_acpi_match[] = { 1073static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1074 { "INT33C0", 0 }, 1074 { "INT33C0", 0 },
1075 { "INT33C1", 0 }, 1075 { "INT33C1", 0 },
1076 { "INT3430", 0 },
1077 { "INT3431", 0 },
1076 { "80860F0E", 0 }, 1078 { "80860F0E", 0 },
1077 { }, 1079 { },
1078}; 1080};
@@ -1291,6 +1293,9 @@ static int pxa2xx_spi_resume(struct device *dev)
1291 /* Enable the SSP clock */ 1293 /* Enable the SSP clock */
1292 clk_prepare_enable(ssp->clk); 1294 clk_prepare_enable(ssp->clk);
1293 1295
1296 /* Restore LPSS private register bits */
1297 lpss_ssp_setup(drv_data);
1298
1294 /* Start the queue running */ 1299 /* Start the queue running */
1295 status = spi_master_resume(drv_data->master); 1300 status = spi_master_resume(drv_data->master);
1296 if (status != 0) { 1301 if (status != 0) {
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 58449ad4ad0d..9e829cee7357 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -885,14 +885,13 @@ static void rspi_release_dma(struct rspi_data *rspi)
885 885
886static int rspi_remove(struct platform_device *pdev) 886static int rspi_remove(struct platform_device *pdev)
887{ 887{
888 struct rspi_data *rspi = spi_master_get(platform_get_drvdata(pdev)); 888 struct rspi_data *rspi = platform_get_drvdata(pdev);
889 889
890 spi_unregister_master(rspi->master); 890 spi_unregister_master(rspi->master);
891 rspi_release_dma(rspi); 891 rspi_release_dma(rspi);
892 free_irq(platform_get_irq(pdev, 0), rspi); 892 free_irq(platform_get_irq(pdev, 0), rspi);
893 clk_put(rspi->clk); 893 clk_put(rspi->clk);
894 iounmap(rspi->addr); 894 iounmap(rspi->addr);
895 spi_master_put(rspi->master);
896 895
897 return 0; 896 return 0;
898} 897}
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 0b71270fbf67..4396bd448540 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -161,7 +161,7 @@ static int ti_qspi_setup(struct spi_device *spi)
161 qspi->spi_max_frequency, clk_div); 161 qspi->spi_max_frequency, clk_div);
162 162
163 ret = pm_runtime_get_sync(qspi->dev); 163 ret = pm_runtime_get_sync(qspi->dev);
164 if (ret) { 164 if (ret < 0) {
165 dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); 165 dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
166 return ret; 166 return ret;
167 } 167 }
@@ -459,11 +459,10 @@ static int ti_qspi_probe(struct platform_device *pdev)
459 if (!of_property_read_u32(np, "num-cs", &num_cs)) 459 if (!of_property_read_u32(np, "num-cs", &num_cs))
460 master->num_chipselect = num_cs; 460 master->num_chipselect = num_cs;
461 461
462 platform_set_drvdata(pdev, master);
463
464 qspi = spi_master_get_devdata(master); 462 qspi = spi_master_get_devdata(master);
465 qspi->master = master; 463 qspi->master = master;
466 qspi->dev = &pdev->dev; 464 qspi->dev = &pdev->dev;
465 platform_set_drvdata(pdev, qspi);
467 466
468 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 467 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
469 468
@@ -517,10 +516,26 @@ free_master:
517 516
518static int ti_qspi_remove(struct platform_device *pdev) 517static int ti_qspi_remove(struct platform_device *pdev)
519{ 518{
520 struct ti_qspi *qspi = platform_get_drvdata(pdev); 519 struct spi_master *master;
520 struct ti_qspi *qspi;
521 int ret;
522
523 master = platform_get_drvdata(pdev);
524 qspi = spi_master_get_devdata(master);
525
526 ret = pm_runtime_get_sync(qspi->dev);
527 if (ret < 0) {
528 dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
529 return ret;
530 }
521 531
522 ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG); 532 ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
523 533
534 pm_runtime_put(qspi->dev);
535 pm_runtime_disable(&pdev->dev);
536
537 spi_unregister_master(master);
538
524 return 0; 539 return 0;
525} 540}
526 541
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 637cce2b8bdd..18c9bb2b5f39 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -425,7 +425,7 @@ exit:
425 425
426static int txx9spi_remove(struct platform_device *dev) 426static int txx9spi_remove(struct platform_device *dev)
427{ 427{
428 struct spi_master *master = spi_master_get(platform_get_drvdata(dev)); 428 struct spi_master *master = platform_get_drvdata(dev);
429 struct txx9spi *c = spi_master_get_devdata(master); 429 struct txx9spi *c = spi_master_get_devdata(master);
430 430
431 destroy_workqueue(c->workqueue); 431 destroy_workqueue(c->workqueue);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8d85ddc46011..349ebba4b199 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -357,6 +357,19 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
357} 357}
358EXPORT_SYMBOL_GPL(spi_alloc_device); 358EXPORT_SYMBOL_GPL(spi_alloc_device);
359 359
360static void spi_dev_set_name(struct spi_device *spi)
361{
362 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
363
364 if (adev) {
365 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
366 return;
367 }
368
369 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
370 spi->chip_select);
371}
372
360/** 373/**
361 * spi_add_device - Add spi_device allocated with spi_alloc_device 374 * spi_add_device - Add spi_device allocated with spi_alloc_device
362 * @spi: spi_device to register 375 * @spi: spi_device to register
@@ -383,9 +396,7 @@ int spi_add_device(struct spi_device *spi)
383 } 396 }
384 397
385 /* Set the bus ID string */ 398 /* Set the bus ID string */
386 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 399 spi_dev_set_name(spi);
387 spi->chip_select);
388
389 400
390 /* We need to make sure there's no other device with this 401 /* We need to make sure there's no other device with this
391 * chipselect **BEFORE** we call setup(), else we'll trash 402 * chipselect **BEFORE** we call setup(), else we'll trash
@@ -1144,7 +1155,7 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1144 return AE_NO_MEMORY; 1155 return AE_NO_MEMORY;
1145 } 1156 }
1146 1157
1147 ACPI_HANDLE_SET(&spi->dev, handle); 1158 ACPI_COMPANION_SET(&spi->dev, adev);
1148 spi->irq = -1; 1159 spi->irq = -1;
1149 1160
1150 INIT_LIST_HEAD(&resource_list); 1161 INIT_LIST_HEAD(&resource_list);
@@ -1404,7 +1415,7 @@ int devm_spi_register_master(struct device *dev, struct spi_master *master)
1404 return -ENOMEM; 1415 return -ENOMEM;
1405 1416
1406 ret = spi_register_master(master); 1417 ret = spi_register_master(master);
1407 if (ret != 0) { 1418 if (!ret) {
1408 *ptr = master; 1419 *ptr = master;
1409 devres_add(dev, ptr); 1420 devres_add(dev, ptr);
1410 } else { 1421 } else {
diff --git a/drivers/staging/btmtk_usb/btmtk_usb.c b/drivers/staging/btmtk_usb/btmtk_usb.c
index 7a9bf3b57810..9a5ebd6cc512 100644
--- a/drivers/staging/btmtk_usb/btmtk_usb.c
+++ b/drivers/staging/btmtk_usb/btmtk_usb.c
@@ -1284,9 +1284,8 @@ done:
1284 kfree_skb(skb); 1284 kfree_skb(skb);
1285} 1285}
1286 1286
1287static int btmtk_usb_send_frame(struct sk_buff *skb) 1287static int btmtk_usb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1288{ 1288{
1289 struct hci_dev *hdev = (struct hci_dev *)skb->dev;
1290 struct btmtk_usb_data *data = hci_get_drvdata(hdev); 1289 struct btmtk_usb_data *data = hci_get_drvdata(hdev);
1291 struct usb_ctrlrequest *dr; 1290 struct usb_ctrlrequest *dr;
1292 struct urb *urb; 1291 struct urb *urb;
diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c
index d041b714db29..2baaf1db6fbf 100644
--- a/drivers/staging/comedi/drivers/pcl730.c
+++ b/drivers/staging/comedi/drivers/pcl730.c
@@ -173,11 +173,11 @@ static int pcl730_do_insn_bits(struct comedi_device *dev,
173 if (mask) { 173 if (mask) {
174 if (mask & 0x00ff) 174 if (mask & 0x00ff)
175 outb(s->state & 0xff, dev->iobase + reg); 175 outb(s->state & 0xff, dev->iobase + reg);
176 if ((mask & 0xff00) & (s->n_chan > 8)) 176 if ((mask & 0xff00) && (s->n_chan > 8))
177 outb((s->state >> 8) & 0xff, dev->iobase + reg + 1); 177 outb((s->state >> 8) & 0xff, dev->iobase + reg + 1);
178 if ((mask & 0xff0000) & (s->n_chan > 16)) 178 if ((mask & 0xff0000) && (s->n_chan > 16))
179 outb((s->state >> 16) & 0xff, dev->iobase + reg + 2); 179 outb((s->state >> 16) & 0xff, dev->iobase + reg + 2);
180 if ((mask & 0xff000000) & (s->n_chan > 24)) 180 if ((mask & 0xff000000) && (s->n_chan > 24))
181 outb((s->state >> 24) & 0xff, dev->iobase + reg + 3); 181 outb((s->state >> 24) & 0xff, dev->iobase + reg + 3);
182 } 182 }
183 183
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 6815cfe2664e..b486099b543d 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -494,7 +494,7 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
494 * Private helper function: Write setpoint to an application DAC channel. 494 * Private helper function: Write setpoint to an application DAC channel.
495 */ 495 */
496static void s626_set_dac(struct comedi_device *dev, uint16_t chan, 496static void s626_set_dac(struct comedi_device *dev, uint16_t chan,
497 unsigned short dacdata) 497 int16_t dacdata)
498{ 498{
499 struct s626_private *devpriv = dev->private; 499 struct s626_private *devpriv = dev->private;
500 uint16_t signmask; 500 uint16_t signmask;
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 933b01a0f03d..0adf3cffddb0 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -465,7 +465,7 @@ static int vmk80xx_do_insn_bits(struct comedi_device *dev,
465 unsigned char *rx_buf = devpriv->usb_rx_buf; 465 unsigned char *rx_buf = devpriv->usb_rx_buf;
466 unsigned char *tx_buf = devpriv->usb_tx_buf; 466 unsigned char *tx_buf = devpriv->usb_tx_buf;
467 int reg, cmd; 467 int reg, cmd;
468 int ret; 468 int ret = 0;
469 469
470 if (devpriv->model == VMK8061_MODEL) { 470 if (devpriv->model == VMK8061_MODEL) {
471 reg = VMK8061_DO_REG; 471 reg = VMK8061_DO_REG;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 68ded17c0f5c..12f333fa59b5 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -578,7 +578,7 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
578 u8 **c_file, const u8 *endpoint, bool boot_case) 578 u8 **c_file, const u8 *endpoint, bool boot_case)
579{ 579{
580 long word_length; 580 long word_length;
581 int status; 581 int status = 0;
582 582
583 /*DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");i*/ 583 /*DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");i*/
584 word_length = get_request_value(ft1000dev); 584 word_length = get_request_value(ft1000dev);
@@ -1074,4 +1074,3 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
1074 1074
1075 return status; 1075 return status;
1076} 1076}
1077
diff --git a/drivers/staging/iio/magnetometer/Kconfig b/drivers/staging/iio/magnetometer/Kconfig
index a3ea69e9d800..34634da1f9f7 100644
--- a/drivers/staging/iio/magnetometer/Kconfig
+++ b/drivers/staging/iio/magnetometer/Kconfig
@@ -6,6 +6,8 @@ menu "Magnetometer sensors"
6config SENSORS_HMC5843 6config SENSORS_HMC5843
7 tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer" 7 tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer"
8 depends on I2C 8 depends on I2C
9 select IIO_BUFFER
10 select IIO_TRIGGERED_BUFFER
9 help 11 help
10 Say Y here to add support for the Honeywell HMC5843, HMC5883 and 12 Say Y here to add support for the Honeywell HMC5843, HMC5883 and
11 HMC5883L 3-Axis Magnetometer (digital compass). 13 HMC5883L 3-Axis Magnetometer (digital compass).
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index 2c3a9e178fb5..8742432d7b01 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -8,4 +8,6 @@ obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
8obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o 8obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
9obj-$(CONFIG_DRM_IMX_FB_HELPER) += imx-fbdev.o 9obj-$(CONFIG_DRM_IMX_FB_HELPER) += imx-fbdev.o
10obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/ 10obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
11obj-$(CONFIG_DRM_IMX_IPUV3) += ipuv3-crtc.o ipuv3-plane.o 11
12imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
13obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 51aa9772f959..6bd015ac9d68 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -72,6 +72,7 @@ int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
72{ 72{
73 return crtc->pipe; 73 return crtc->pipe;
74} 74}
75EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
75 76
76static void imx_drm_driver_lastclose(struct drm_device *drm) 77static void imx_drm_driver_lastclose(struct drm_device *drm)
77{ 78{
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 5dec771d70ee..4d340f4a2198 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -409,8 +409,8 @@ int ptlrpc_stop_pinger(void)
409 struct l_wait_info lwi = { 0 }; 409 struct l_wait_info lwi = { 0 };
410 int rc = 0; 410 int rc = 0;
411 411
412 if (!thread_is_init(&pinger_thread) && 412 if (thread_is_init(&pinger_thread) ||
413 !thread_is_stopped(&pinger_thread)) 413 thread_is_stopped(&pinger_thread))
414 return -EALREADY; 414 return -EALREADY;
415 415
416 ptlrpc_pinger_remove_timeouts(); 416 ptlrpc_pinger_remove_timeouts();
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 58684da45e6c..b658c2316df3 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -15,6 +15,8 @@
15 * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 15 * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/kernel.h> 21#include <linux/kernel.h>
20#include <linux/init.h> 22#include <linux/init.h>
@@ -661,7 +663,7 @@ static int go7007_usb_interface_reset(struct go7007 *go)
661 663
662 if (usb->board->flags & GO7007_USB_EZUSB) { 664 if (usb->board->flags & GO7007_USB_EZUSB) {
663 /* Reset buffer in EZ-USB */ 665 /* Reset buffer in EZ-USB */
664 dev_dbg(go->dev, "resetting EZ-USB buffers\n"); 666 pr_debug("resetting EZ-USB buffers\n");
665 if (go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0 || 667 if (go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0 ||
666 go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0) 668 go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0)
667 return -1; 669 return -1;
@@ -689,7 +691,7 @@ static int go7007_usb_ezusb_write_interrupt(struct go7007 *go,
689 u16 status_reg = 0; 691 u16 status_reg = 0;
690 int timeout = 500; 692 int timeout = 500;
691 693
692 dev_dbg(go->dev, "WriteInterrupt: %04x %04x\n", addr, data); 694 pr_debug("WriteInterrupt: %04x %04x\n", addr, data);
693 695
694 for (i = 0; i < 100; ++i) { 696 for (i = 0; i < 100; ++i) {
695 r = usb_control_msg(usb->usbdev, 697 r = usb_control_msg(usb->usbdev,
@@ -734,7 +736,7 @@ static int go7007_usb_onboard_write_interrupt(struct go7007 *go,
734 int r; 736 int r;
735 int timeout = 500; 737 int timeout = 500;
736 738
737 dev_dbg(go->dev, "WriteInterrupt: %04x %04x\n", addr, data); 739 pr_debug("WriteInterrupt: %04x %04x\n", addr, data);
738 740
739 go->usb_buf[0] = data & 0xff; 741 go->usb_buf[0] = data & 0xff;
740 go->usb_buf[1] = data >> 8; 742 go->usb_buf[1] = data >> 8;
@@ -771,7 +773,7 @@ static void go7007_usb_readinterrupt_complete(struct urb *urb)
771 go->interrupt_available = 1; 773 go->interrupt_available = 1;
772 go->interrupt_data = __le16_to_cpu(regs[0]); 774 go->interrupt_data = __le16_to_cpu(regs[0]);
773 go->interrupt_value = __le16_to_cpu(regs[1]); 775 go->interrupt_value = __le16_to_cpu(regs[1]);
774 dev_dbg(go->dev, "ReadInterrupt: %04x %04x\n", 776 pr_debug("ReadInterrupt: %04x %04x\n",
775 go->interrupt_value, go->interrupt_data); 777 go->interrupt_value, go->interrupt_data);
776 } 778 }
777 779
@@ -891,7 +893,7 @@ static int go7007_usb_send_firmware(struct go7007 *go, u8 *data, int len)
891 int transferred, pipe; 893 int transferred, pipe;
892 int timeout = 500; 894 int timeout = 500;
893 895
894 dev_dbg(go->dev, "DownloadBuffer sending %d bytes\n", len); 896 pr_debug("DownloadBuffer sending %d bytes\n", len);
895 897
896 if (usb->board->flags & GO7007_USB_EZUSB) 898 if (usb->board->flags & GO7007_USB_EZUSB)
897 pipe = usb_sndbulkpipe(usb->usbdev, 2); 899 pipe = usb_sndbulkpipe(usb->usbdev, 2);
@@ -977,7 +979,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
977 !(msgs[i].flags & I2C_M_RD) && 979 !(msgs[i].flags & I2C_M_RD) &&
978 (msgs[i + 1].flags & I2C_M_RD)) { 980 (msgs[i + 1].flags & I2C_M_RD)) {
979#ifdef GO7007_I2C_DEBUG 981#ifdef GO7007_I2C_DEBUG
980 dev_dbg(go->dev, "i2c write/read %d/%d bytes on %02x\n", 982 pr_debug("i2c write/read %d/%d bytes on %02x\n",
981 msgs[i].len, msgs[i + 1].len, msgs[i].addr); 983 msgs[i].len, msgs[i + 1].len, msgs[i].addr);
982#endif 984#endif
983 buf[0] = 0x01; 985 buf[0] = 0x01;
@@ -988,7 +990,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
988 buf[buf_len++] = msgs[++i].len; 990 buf[buf_len++] = msgs[++i].len;
989 } else if (msgs[i].flags & I2C_M_RD) { 991 } else if (msgs[i].flags & I2C_M_RD) {
990#ifdef GO7007_I2C_DEBUG 992#ifdef GO7007_I2C_DEBUG
991 dev_dbg(go->dev, "i2c read %d bytes on %02x\n", 993 pr_debug("i2c read %d bytes on %02x\n",
992 msgs[i].len, msgs[i].addr); 994 msgs[i].len, msgs[i].addr);
993#endif 995#endif
994 buf[0] = 0x01; 996 buf[0] = 0x01;
@@ -998,7 +1000,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
998 buf_len = 4; 1000 buf_len = 4;
999 } else { 1001 } else {
1000#ifdef GO7007_I2C_DEBUG 1002#ifdef GO7007_I2C_DEBUG
1001 dev_dbg(go->dev, "i2c write %d bytes on %02x\n", 1003 pr_debug("i2c write %d bytes on %02x\n",
1002 msgs[i].len, msgs[i].addr); 1004 msgs[i].len, msgs[i].addr);
1003#endif 1005#endif
1004 buf[0] = 0x00; 1006 buf[0] = 0x00;
@@ -1057,7 +1059,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
1057 char *name; 1059 char *name;
1058 int video_pipe, i, v_urb_len; 1060 int video_pipe, i, v_urb_len;
1059 1061
1060 dev_dbg(go->dev, "probing new GO7007 USB board\n"); 1062 pr_debug("probing new GO7007 USB board\n");
1061 1063
1062 switch (id->driver_info) { 1064 switch (id->driver_info) {
1063 case GO7007_BOARDID_MATRIX_II: 1065 case GO7007_BOARDID_MATRIX_II:
@@ -1097,13 +1099,13 @@ static int go7007_usb_probe(struct usb_interface *intf,
1097 board = &board_px_tv402u; 1099 board = &board_px_tv402u;
1098 break; 1100 break;
1099 case GO7007_BOARDID_LIFEVIEW_LR192: 1101 case GO7007_BOARDID_LIFEVIEW_LR192:
1100 dev_err(go->dev, "The Lifeview TV Walker Ultra is not supported. Sorry!\n"); 1102 dev_err(&intf->dev, "The Lifeview TV Walker Ultra is not supported. Sorry!\n");
1101 return -ENODEV; 1103 return -ENODEV;
1102 name = "Lifeview TV Walker Ultra"; 1104 name = "Lifeview TV Walker Ultra";
1103 board = &board_lifeview_lr192; 1105 board = &board_lifeview_lr192;
1104 break; 1106 break;
1105 case GO7007_BOARDID_SENSORAY_2250: 1107 case GO7007_BOARDID_SENSORAY_2250:
1106 dev_info(go->dev, "Sensoray 2250 found\n"); 1108 dev_info(&intf->dev, "Sensoray 2250 found\n");
1107 name = "Sensoray 2250/2251"; 1109 name = "Sensoray 2250/2251";
1108 board = &board_sensoray_2250; 1110 board = &board_sensoray_2250;
1109 break; 1111 break;
@@ -1112,7 +1114,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
1112 board = &board_ads_usbav_709; 1114 board = &board_ads_usbav_709;
1113 break; 1115 break;
1114 default: 1116 default:
1115 dev_err(go->dev, "unknown board ID %d!\n", 1117 dev_err(&intf->dev, "unknown board ID %d!\n",
1116 (unsigned int)id->driver_info); 1118 (unsigned int)id->driver_info);
1117 return -ENODEV; 1119 return -ENODEV;
1118 } 1120 }
@@ -1247,7 +1249,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
1247 sizeof(go->name)); 1249 sizeof(go->name));
1248 break; 1250 break;
1249 default: 1251 default:
1250 dev_dbg(go->dev, "unable to detect tuner type!\n"); 1252 pr_debug("unable to detect tuner type!\n");
1251 break; 1253 break;
1252 } 1254 }
1253 /* Configure tuner mode selection inputs connected 1255 /* Configure tuner mode selection inputs connected
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 3066ee2e753b..49ea76b3435d 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -681,7 +681,8 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
681 dev_err(nvec->dev, 681 dev_err(nvec->dev,
682 "RX buffer overflow on %p: " 682 "RX buffer overflow on %p: "
683 "Trying to write byte %u of %u\n", 683 "Trying to write byte %u of %u\n",
684 nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE); 684 nvec->rx, nvec->rx ? nvec->rx->pos : 0,
685 NVEC_MSG_SIZE);
685 break; 686 break;
686 default: 687 default:
687 nvec->state = 0; 688 nvec->state = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 2c678f409573..2f548ebada59 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -1115,6 +1115,9 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
1115 return _FAIL; 1115 return _FAIL;
1116 } 1116 }
1117 1117
1118 /* fix bug of flush_cam_entry at STOP AP mode */
1119 psta->state |= WIFI_AP_STATE;
1120 rtw_indicate_connect(padapter);
1118 pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */ 1121 pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */
1119 return ret; 1122 return ret;
1120} 1123}
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 165b918b8171..1b6d581c438b 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig TIDSPBRIDGE 5menuconfig TIDSPBRIDGE
6 tristate "DSP Bridge driver" 6 tristate "DSP Bridge driver"
7 depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM 7 depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM && BROKEN
8 select MAILBOX 8 select MAILBOX
9 select OMAP2PLUS_MBOX 9 select OMAP2PLUS_MBOX
10 help 10 help
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 1aa4a3fd0f1b..56e355b3e7fa 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -258,7 +258,8 @@ err:
258/* This function maps kernel space memory to user space memory. */ 258/* This function maps kernel space memory to user space memory. */
259static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) 259static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
260{ 260{
261 u32 status; 261 struct omap_dsp_platform_data *pdata =
262 omap_dspbridge_dev->dev.platform_data;
262 263
263 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 264 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
264 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 265 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -268,13 +269,9 @@ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
268 vma->vm_start, vma->vm_end, vma->vm_page_prot, 269 vma->vm_start, vma->vm_end, vma->vm_page_prot,
269 vma->vm_flags); 270 vma->vm_flags);
270 271
271 status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 272 return vm_iomap_memory(vma,
272 vma->vm_end - vma->vm_start, 273 pdata->phys_mempool_base,
273 vma->vm_page_prot); 274 pdata->phys_mempool_size);
274 if (status != 0)
275 status = -EAGAIN;
276
277 return status;
278} 275}
279 276
280static const struct file_operations bridge_fops = { 277static const struct file_operations bridge_fops = {
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index aab0012bba92..ab8b2ba6eedd 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -143,7 +143,8 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
143 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", 143 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
144 pDevice->dev->name, pDevice->apdev->name); 144 pDevice->dev->name, pDevice->apdev->name);
145 } 145 }
146 free_netdev(pDevice->apdev); 146 if (pDevice->apdev)
147 free_netdev(pDevice->apdev);
147 pDevice->apdev = NULL; 148 pDevice->apdev = NULL;
148 pDevice->bEnable8021x = false; 149 pDevice->bEnable8021x = false;
149 pDevice->bEnableHostWEP = false; 150 pDevice->bEnableHostWEP = false;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 1e8b8412e67e..4aa5ef54b683 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -939,6 +939,7 @@ int BBbVT3184Init(struct vnt_private *pDevice)
939 u8 * pbyAgc; 939 u8 * pbyAgc;
940 u16 wLengthAgc; 940 u16 wLengthAgc;
941 u8 abyArray[256]; 941 u8 abyArray[256];
942 u8 data;
942 943
943 ntStatus = CONTROLnsRequestIn(pDevice, 944 ntStatus = CONTROLnsRequestIn(pDevice,
944 MESSAGE_TYPE_READ, 945 MESSAGE_TYPE_READ,
@@ -1104,6 +1105,16 @@ else {
1104 ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01); 1105 ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01);
1105 1106
1106 RFbRFTableDownload(pDevice); 1107 RFbRFTableDownload(pDevice);
1108
1109 /* Fix for TX USB resets from vendors driver */
1110 CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, USB_REG4,
1111 MESSAGE_REQUEST_MEM, sizeof(data), &data);
1112
1113 data |= 0x2;
1114
1115 CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, USB_REG4,
1116 MESSAGE_REQUEST_MEM, sizeof(data), &data);
1117
1107 return true;//ntStatus; 1118 return true;//ntStatus;
1108} 1119}
1109 1120
diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
index ae1676d190c5..67ba48b9a8d9 100644
--- a/drivers/staging/vt6656/hostap.c
+++ b/drivers/staging/vt6656/hostap.c
@@ -133,7 +133,8 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
133 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", 133 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
134 pDevice->dev->name, pDevice->apdev->name); 134 pDevice->dev->name, pDevice->apdev->name);
135 } 135 }
136 free_netdev(pDevice->apdev); 136 if (pDevice->apdev)
137 free_netdev(pDevice->apdev);
137 pDevice->apdev = NULL; 138 pDevice->apdev = NULL;
138 pDevice->bEnable8021x = false; 139 pDevice->bEnable8021x = false;
139 pDevice->bEnableHostWEP = false; 140 pDevice->bEnableHostWEP = false;
diff --git a/drivers/staging/vt6656/rndis.h b/drivers/staging/vt6656/rndis.h
index 5e073062017a..5cf5e732a36f 100644
--- a/drivers/staging/vt6656/rndis.h
+++ b/drivers/staging/vt6656/rndis.h
@@ -66,6 +66,8 @@
66 66
67#define VIAUSB20_PACKET_HEADER 0x04 67#define VIAUSB20_PACKET_HEADER 0x04
68 68
69#define USB_REG4 0x604
70
69typedef struct _CMD_MESSAGE 71typedef struct _CMD_MESSAGE
70{ 72{
71 u8 byData[256]; 73 u8 byData[256];
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 79ce363b2ea9..3277d9838f4e 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -652,21 +652,30 @@ static ssize_t reset_store(struct device *dev,
652 return -ENOMEM; 652 return -ENOMEM;
653 653
654 /* Do not reset an active device! */ 654 /* Do not reset an active device! */
655 if (bdev->bd_holders) 655 if (bdev->bd_holders) {
656 return -EBUSY; 656 ret = -EBUSY;
657 goto out;
658 }
657 659
658 ret = kstrtou16(buf, 10, &do_reset); 660 ret = kstrtou16(buf, 10, &do_reset);
659 if (ret) 661 if (ret)
660 return ret; 662 goto out;
661 663
662 if (!do_reset) 664 if (!do_reset) {
663 return -EINVAL; 665 ret = -EINVAL;
666 goto out;
667 }
664 668
665 /* Make sure all pending I/O is finished */ 669 /* Make sure all pending I/O is finished */
666 fsync_bdev(bdev); 670 fsync_bdev(bdev);
671 bdput(bdev);
667 672
668 zram_reset_device(zram, true); 673 zram_reset_device(zram, true);
669 return len; 674 return len;
675
676out:
677 bdput(bdev);
678 return ret;
670} 679}
671 680
672static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) 681static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 1a67537dbc56..3b950e5a918f 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -430,7 +430,12 @@ static struct page *get_next_page(struct page *page)
430 return next; 430 return next;
431} 431}
432 432
433/* Encode <page, obj_idx> as a single handle value */ 433/*
434 * Encode <page, obj_idx> as a single handle value.
435 * On hardware platforms with physical memory starting at 0x0 the pfn
436 * could be 0 so we ensure that the handle will never be 0 by adjusting the
437 * encoded obj_idx value before encoding.
438 */
434static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) 439static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
435{ 440{
436 unsigned long handle; 441 unsigned long handle;
@@ -441,17 +446,21 @@ static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
441 } 446 }
442 447
443 handle = page_to_pfn(page) << OBJ_INDEX_BITS; 448 handle = page_to_pfn(page) << OBJ_INDEX_BITS;
444 handle |= (obj_idx & OBJ_INDEX_MASK); 449 handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
445 450
446 return (void *)handle; 451 return (void *)handle;
447} 452}
448 453
449/* Decode <page, obj_idx> pair from the given object handle */ 454/*
455 * Decode <page, obj_idx> pair from the given object handle. We adjust the
456 * decoded obj_idx back to its original value since it was adjusted in
457 * obj_location_to_handle().
458 */
450static void obj_handle_to_location(unsigned long handle, struct page **page, 459static void obj_handle_to_location(unsigned long handle, struct page **page,
451 unsigned long *obj_idx) 460 unsigned long *obj_idx)
452{ 461{
453 *page = pfn_to_page(handle >> OBJ_INDEX_BITS); 462 *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
454 *obj_idx = handle & OBJ_INDEX_MASK; 463 *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
455} 464}
456 465
457static unsigned long obj_idx_to_offset(struct page *page, 466static unsigned long obj_idx_to_offset(struct page *page,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 38e44b9abf0f..d70e9119e906 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -805,14 +805,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
805 int iscsi_task_attr; 805 int iscsi_task_attr;
806 int sam_task_attr; 806 int sam_task_attr;
807 807
808 spin_lock_bh(&conn->sess->session_stats_lock); 808 atomic_long_inc(&conn->sess->cmd_pdus);
809 conn->sess->cmd_pdus++;
810 if (conn->sess->se_sess->se_node_acl) {
811 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
812 conn->sess->se_sess->se_node_acl->num_cmds++;
813 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
814 }
815 spin_unlock_bh(&conn->sess->session_stats_lock);
816 809
817 hdr = (struct iscsi_scsi_req *) buf; 810 hdr = (struct iscsi_scsi_req *) buf;
818 payload_length = ntoh24(hdr->dlength); 811 payload_length = ntoh24(hdr->dlength);
@@ -1254,20 +1247,12 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
1254 int rc; 1247 int rc;
1255 1248
1256 if (!payload_length) { 1249 if (!payload_length) {
1257 pr_err("DataOUT payload is ZERO, protocol error.\n"); 1250 pr_warn("DataOUT payload is ZERO, ignoring.\n");
1258 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1251 return 0;
1259 buf);
1260 } 1252 }
1261 1253
1262 /* iSCSI write */ 1254 /* iSCSI write */
1263 spin_lock_bh(&conn->sess->session_stats_lock); 1255 atomic_long_add(payload_length, &conn->sess->rx_data_octets);
1264 conn->sess->rx_data_octets += payload_length;
1265 if (conn->sess->se_sess->se_node_acl) {
1266 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
1267 conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
1268 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
1269 }
1270 spin_unlock_bh(&conn->sess->session_stats_lock);
1271 1256
1272 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 1257 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1273 pr_err("DataSegmentLength: %u is greater than" 1258 pr_err("DataSegmentLength: %u is greater than"
@@ -1486,7 +1471,7 @@ EXPORT_SYMBOL(iscsit_check_dataout_payload);
1486 1471
1487static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) 1472static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1488{ 1473{
1489 struct iscsi_cmd *cmd; 1474 struct iscsi_cmd *cmd = NULL;
1490 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1475 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1491 int rc; 1476 int rc;
1492 bool data_crc_failed = false; 1477 bool data_crc_failed = false;
@@ -1954,6 +1939,13 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1954 (unsigned char *)hdr); 1939 (unsigned char *)hdr);
1955 } 1940 }
1956 1941
1942 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
1943 (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
1944 pr_err("Multi sequence text commands currently not supported\n");
1945 return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
1946 (unsigned char *)hdr);
1947 }
1948
1957 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," 1949 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
1958 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, 1950 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
1959 hdr->exp_statsn, payload_length); 1951 hdr->exp_statsn, payload_length);
@@ -2630,14 +2622,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2630 return -1; 2622 return -1;
2631 } 2623 }
2632 2624
2633 spin_lock_bh(&conn->sess->session_stats_lock); 2625 atomic_long_add(datain.length, &conn->sess->tx_data_octets);
2634 conn->sess->tx_data_octets += datain.length;
2635 if (conn->sess->se_sess->se_node_acl) {
2636 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
2637 conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
2638 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
2639 }
2640 spin_unlock_bh(&conn->sess->session_stats_lock);
2641 /* 2626 /*
2642 * Special case for successfully execution w/ both DATAIN 2627 * Special case for successfully execution w/ both DATAIN
2643 * and Sense Data. 2628 * and Sense Data.
@@ -3162,9 +3147,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3162 if (inc_stat_sn) 3147 if (inc_stat_sn)
3163 cmd->stat_sn = conn->stat_sn++; 3148 cmd->stat_sn = conn->stat_sn++;
3164 3149
3165 spin_lock_bh(&conn->sess->session_stats_lock); 3150 atomic_long_inc(&conn->sess->rsp_pdus);
3166 conn->sess->rsp_pdus++;
3167 spin_unlock_bh(&conn->sess->session_stats_lock);
3168 3151
3169 memset(hdr, 0, ISCSI_HDR_LEN); 3152 memset(hdr, 0, ISCSI_HDR_LEN);
3170 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; 3153 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
@@ -3374,6 +3357,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3374 struct iscsi_tiqn *tiqn; 3357 struct iscsi_tiqn *tiqn;
3375 struct iscsi_tpg_np *tpg_np; 3358 struct iscsi_tpg_np *tpg_np;
3376 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; 3359 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3360 int target_name_printed;
3377 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ 3361 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3378 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; 3362 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3379 3363
@@ -3411,19 +3395,23 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3411 continue; 3395 continue;
3412 } 3396 }
3413 3397
3414 len = sprintf(buf, "TargetName=%s", tiqn->tiqn); 3398 target_name_printed = 0;
3415 len += 1;
3416
3417 if ((len + payload_len) > buffer_len) {
3418 end_of_buf = 1;
3419 goto eob;
3420 }
3421 memcpy(payload + payload_len, buf, len);
3422 payload_len += len;
3423 3399
3424 spin_lock(&tiqn->tiqn_tpg_lock); 3400 spin_lock(&tiqn->tiqn_tpg_lock);
3425 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 3401 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3426 3402
3403 /* If demo_mode_discovery=0 and generate_node_acls=0
3404 * (demo mode dislabed) do not return
3405 * TargetName+TargetAddress unless a NodeACL exists.
3406 */
3407
3408 if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3409 (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3410 (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg,
3411 cmd->conn->sess->sess_ops->InitiatorName))) {
3412 continue;
3413 }
3414
3427 spin_lock(&tpg->tpg_state_lock); 3415 spin_lock(&tpg->tpg_state_lock);
3428 if ((tpg->tpg_state == TPG_STATE_FREE) || 3416 if ((tpg->tpg_state == TPG_STATE_FREE) ||
3429 (tpg->tpg_state == TPG_STATE_INACTIVE)) { 3417 (tpg->tpg_state == TPG_STATE_INACTIVE)) {
@@ -3438,6 +3426,22 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3438 struct iscsi_np *np = tpg_np->tpg_np; 3426 struct iscsi_np *np = tpg_np->tpg_np;
3439 bool inaddr_any = iscsit_check_inaddr_any(np); 3427 bool inaddr_any = iscsit_check_inaddr_any(np);
3440 3428
3429 if (!target_name_printed) {
3430 len = sprintf(buf, "TargetName=%s",
3431 tiqn->tiqn);
3432 len += 1;
3433
3434 if ((len + payload_len) > buffer_len) {
3435 spin_unlock(&tpg->tpg_np_lock);
3436 spin_unlock(&tiqn->tiqn_tpg_lock);
3437 end_of_buf = 1;
3438 goto eob;
3439 }
3440 memcpy(payload + payload_len, buf, len);
3441 payload_len += len;
3442 target_name_printed = 1;
3443 }
3444
3441 len = sprintf(buf, "TargetAddress=" 3445 len = sprintf(buf, "TargetAddress="
3442 "%s:%hu,%hu", 3446 "%s:%hu,%hu",
3443 (inaddr_any == false) ? 3447 (inaddr_any == false) ?
@@ -4092,9 +4096,7 @@ restart:
4092 * hit default in the switch below. 4096 * hit default in the switch below.
4093 */ 4097 */
4094 memset(buffer, 0xff, ISCSI_HDR_LEN); 4098 memset(buffer, 0xff, ISCSI_HDR_LEN);
4095 spin_lock_bh(&conn->sess->session_stats_lock); 4099 atomic_long_inc(&conn->sess->conn_digest_errors);
4096 conn->sess->conn_digest_errors++;
4097 spin_unlock_bh(&conn->sess->session_stats_lock);
4098 } else { 4100 } else {
4099 pr_debug("Got HeaderDigest CRC32C" 4101 pr_debug("Got HeaderDigest CRC32C"
4100 " 0x%08x\n", checksum); 4102 " 0x%08x\n", checksum);
@@ -4381,7 +4383,7 @@ int iscsit_close_connection(
4381 4383
4382int iscsit_close_session(struct iscsi_session *sess) 4384int iscsit_close_session(struct iscsi_session *sess)
4383{ 4385{
4384 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 4386 struct iscsi_portal_group *tpg = sess->tpg;
4385 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4387 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4386 4388
4387 if (atomic_read(&sess->nconn)) { 4389 if (atomic_read(&sess->nconn)) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 7505fddca15f..de77d9aa22c6 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -111,7 +111,7 @@ static struct iscsi_chap *chap_server_open(
111 /* 111 /*
112 * Set Identifier. 112 * Set Identifier.
113 */ 113 */
114 chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++; 114 chap->id = conn->tpg->tpg_chap_id++;
115 *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id); 115 *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
116 *aic_len += 1; 116 *aic_len += 1;
117 pr_debug("[server] Sending CHAP_I=%d\n", chap->id); 117 pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
@@ -146,6 +146,7 @@ static int chap_server_compute_md5(
146 unsigned char client_digest[MD5_SIGNATURE_SIZE]; 146 unsigned char client_digest[MD5_SIGNATURE_SIZE];
147 unsigned char server_digest[MD5_SIGNATURE_SIZE]; 147 unsigned char server_digest[MD5_SIGNATURE_SIZE];
148 unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; 148 unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
149 size_t compare_len;
149 struct iscsi_chap *chap = conn->auth_protocol; 150 struct iscsi_chap *chap = conn->auth_protocol;
150 struct crypto_hash *tfm; 151 struct crypto_hash *tfm;
151 struct hash_desc desc; 152 struct hash_desc desc;
@@ -184,7 +185,9 @@ static int chap_server_compute_md5(
184 goto out; 185 goto out;
185 } 186 }
186 187
187 if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) { 188 /* Include the terminating NULL in the compare */
189 compare_len = strlen(auth->userid) + 1;
190 if (strncmp(chap_n, auth->userid, compare_len) != 0) {
188 pr_err("CHAP_N values do not match!\n"); 191 pr_err("CHAP_N values do not match!\n");
189 goto out; 192 goto out;
190 } 193 }
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index fd145259361d..e3318edb233d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -372,7 +372,7 @@ static ssize_t iscsi_nacl_attrib_show_##name( \
372 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \ 372 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
373 se_node_acl); \ 373 se_node_acl); \
374 \ 374 \
375 return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \ 375 return sprintf(page, "%u\n", nacl->node_attrib.name); \
376} \ 376} \
377 \ 377 \
378static ssize_t iscsi_nacl_attrib_store_##name( \ 378static ssize_t iscsi_nacl_attrib_store_##name( \
@@ -897,7 +897,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
897 if (!se_nacl_new) 897 if (!se_nacl_new)
898 return ERR_PTR(-ENOMEM); 898 return ERR_PTR(-ENOMEM);
899 899
900 cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; 900 cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth;
901 /* 901 /*
902 * se_nacl_new may be released by core_tpg_add_initiator_node_acl() 902 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
903 * when converting a NdoeACL from demo mode -> explict 903 * when converting a NdoeACL from demo mode -> explict
@@ -920,9 +920,9 @@ static struct se_node_acl *lio_target_make_nodeacl(
920 return ERR_PTR(-ENOMEM); 920 return ERR_PTR(-ENOMEM);
921 } 921 }
922 922
923 stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group; 923 stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;
924 stats_cg->default_groups[1] = NULL; 924 stats_cg->default_groups[1] = NULL;
925 config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group, 925 config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
926 "iscsi_sess_stats", &iscsi_stat_sess_cit); 926 "iscsi_sess_stats", &iscsi_stat_sess_cit);
927 927
928 return se_nacl; 928 return se_nacl;
@@ -967,7 +967,7 @@ static ssize_t iscsi_tpg_attrib_show_##name( \
967 if (iscsit_get_tpg(tpg) < 0) \ 967 if (iscsit_get_tpg(tpg) < 0) \
968 return -EINVAL; \ 968 return -EINVAL; \
969 \ 969 \
970 rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \ 970 rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \
971 iscsit_put_tpg(tpg); \ 971 iscsit_put_tpg(tpg); \
972 return rb; \ 972 return rb; \
973} \ 973} \
@@ -1041,6 +1041,16 @@ TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
1041 */ 1041 */
1042DEF_TPG_ATTRIB(prod_mode_write_protect); 1042DEF_TPG_ATTRIB(prod_mode_write_protect);
1043TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); 1043TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
1044/*
1045 * Define iscsi_tpg_attrib_s_demo_mode_discovery,
1046 */
1047DEF_TPG_ATTRIB(demo_mode_discovery);
1048TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
1049/*
1050 * Define iscsi_tpg_attrib_s_default_erl
1051 */
1052DEF_TPG_ATTRIB(default_erl);
1053TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
1044 1054
1045static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { 1055static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
1046 &iscsi_tpg_attrib_authentication.attr, 1056 &iscsi_tpg_attrib_authentication.attr,
@@ -1051,6 +1061,8 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
1051 &iscsi_tpg_attrib_cache_dynamic_acls.attr, 1061 &iscsi_tpg_attrib_cache_dynamic_acls.attr,
1052 &iscsi_tpg_attrib_demo_mode_write_protect.attr, 1062 &iscsi_tpg_attrib_demo_mode_write_protect.attr,
1053 &iscsi_tpg_attrib_prod_mode_write_protect.attr, 1063 &iscsi_tpg_attrib_prod_mode_write_protect.attr,
1064 &iscsi_tpg_attrib_demo_mode_discovery.attr,
1065 &iscsi_tpg_attrib_default_erl.attr,
1054 NULL, 1066 NULL,
1055}; 1067};
1056 1068
@@ -1514,21 +1526,21 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
1514 return ERR_PTR(-ENOMEM); 1526 return ERR_PTR(-ENOMEM);
1515 } 1527 }
1516 1528
1517 stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group; 1529 stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group;
1518 stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group; 1530 stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group;
1519 stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group; 1531 stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group;
1520 stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group; 1532 stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group;
1521 stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group; 1533 stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group;
1522 stats_cg->default_groups[5] = NULL; 1534 stats_cg->default_groups[5] = NULL;
1523 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group, 1535 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
1524 "iscsi_instance", &iscsi_stat_instance_cit); 1536 "iscsi_instance", &iscsi_stat_instance_cit);
1525 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group, 1537 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
1526 "iscsi_sess_err", &iscsi_stat_sess_err_cit); 1538 "iscsi_sess_err", &iscsi_stat_sess_err_cit);
1527 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group, 1539 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
1528 "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit); 1540 "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
1529 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group, 1541 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
1530 "iscsi_login_stats", &iscsi_stat_login_cit); 1542 "iscsi_login_stats", &iscsi_stat_login_cit);
1531 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group, 1543 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
1532 "iscsi_logout_stats", &iscsi_stat_logout_cit); 1544 "iscsi_logout_stats", &iscsi_stat_logout_cit);
1533 1545
1534 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); 1546 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
@@ -1784,6 +1796,11 @@ static int lio_queue_status(struct se_cmd *se_cmd)
1784 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1796 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1785 1797
1786 cmd->i_state = ISTATE_SEND_STATUS; 1798 cmd->i_state = ISTATE_SEND_STATUS;
1799
1800 if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
1801 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1802 return 0;
1803 }
1787 cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); 1804 cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
1788 1805
1789 return 0; 1806 return 0;
@@ -1815,21 +1832,21 @@ static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
1815{ 1832{
1816 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1833 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1817 1834
1818 return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; 1835 return tpg->tpg_attrib.default_cmdsn_depth;
1819} 1836}
1820 1837
1821static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg) 1838static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
1822{ 1839{
1823 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1840 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1824 1841
1825 return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls; 1842 return tpg->tpg_attrib.generate_node_acls;
1826} 1843}
1827 1844
1828static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg) 1845static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
1829{ 1846{
1830 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1847 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1831 1848
1832 return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls; 1849 return tpg->tpg_attrib.cache_dynamic_acls;
1833} 1850}
1834 1851
1835static int lio_tpg_check_demo_mode_write_protect( 1852static int lio_tpg_check_demo_mode_write_protect(
@@ -1837,7 +1854,7 @@ static int lio_tpg_check_demo_mode_write_protect(
1837{ 1854{
1838 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1855 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1839 1856
1840 return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect; 1857 return tpg->tpg_attrib.demo_mode_write_protect;
1841} 1858}
1842 1859
1843static int lio_tpg_check_prod_mode_write_protect( 1860static int lio_tpg_check_prod_mode_write_protect(
@@ -1845,7 +1862,7 @@ static int lio_tpg_check_prod_mode_write_protect(
1845{ 1862{
1846 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1863 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1847 1864
1848 return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect; 1865 return tpg->tpg_attrib.prod_mode_write_protect;
1849} 1866}
1850 1867
1851static void lio_tpg_release_fabric_acl( 1868static void lio_tpg_release_fabric_acl(
@@ -1908,9 +1925,12 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
1908{ 1925{
1909 struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl, 1926 struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
1910 se_node_acl); 1927 se_node_acl);
1928 struct se_portal_group *se_tpg = se_acl->se_tpg;
1929 struct iscsi_portal_group *tpg = container_of(se_tpg,
1930 struct iscsi_portal_group, tpg_se_tpg);
1911 1931
1912 ISCSI_NODE_ATTRIB(acl)->nacl = acl; 1932 acl->node_attrib.nacl = acl;
1913 iscsit_set_default_node_attribues(acl); 1933 iscsit_set_default_node_attribues(acl, tpg);
1914} 1934}
1915 1935
1916static int lio_check_stop_free(struct se_cmd *se_cmd) 1936static int lio_check_stop_free(struct se_cmd *se_cmd)
@@ -1995,17 +2015,17 @@ int iscsi_target_register_configfs(void)
1995 * Setup default attribute lists for various fabric->tf_cit_tmpl 2015 * Setup default attribute lists for various fabric->tf_cit_tmpl
1996 * sturct config_item_type's 2016 * sturct config_item_type's
1997 */ 2017 */
1998 TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; 2018 fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
1999 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; 2019 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
2000 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; 2020 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
2001 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; 2021 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
2002 TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; 2022 fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
2003 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; 2023 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
2004 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; 2024 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
2005 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; 2025 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
2006 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; 2026 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
2007 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; 2027 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
2008 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; 2028 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
2009 2029
2010 ret = target_fabric_configfs_register(fabric); 2030 ret = target_fabric_configfs_register(fabric);
2011 if (ret < 0) { 2031 if (ret < 0) {
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 9a5721b8ff96..48f7b3bf4e8c 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -37,9 +37,6 @@
37#define NA_RANDOM_DATAIN_PDU_OFFSETS 0 37#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
38#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0 38#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
39#define NA_RANDOM_R2T_OFFSETS 0 39#define NA_RANDOM_R2T_OFFSETS 0
40#define NA_DEFAULT_ERL 0
41#define NA_DEFAULT_ERL_MAX 2
42#define NA_DEFAULT_ERL_MIN 0
43 40
44/* struct iscsi_tpg_attrib sanity values */ 41/* struct iscsi_tpg_attrib sanity values */
45#define TA_AUTHENTICATION 1 42#define TA_AUTHENTICATION 1
@@ -58,6 +55,8 @@
58#define TA_DEMO_MODE_WRITE_PROTECT 1 55#define TA_DEMO_MODE_WRITE_PROTECT 1
59/* Disabled by default in production mode w/ explict ACLs */ 56/* Disabled by default in production mode w/ explict ACLs */
60#define TA_PROD_MODE_WRITE_PROTECT 0 57#define TA_PROD_MODE_WRITE_PROTECT 0
58#define TA_DEMO_MODE_DISCOVERY 1
59#define TA_DEFAULT_ERL 0
61#define TA_CACHE_CORE_NPS 0 60#define TA_CACHE_CORE_NPS 0
62 61
63 62
@@ -192,6 +191,7 @@ enum recover_cmdsn_ret_table {
192 CMDSN_NORMAL_OPERATION = 0, 191 CMDSN_NORMAL_OPERATION = 0,
193 CMDSN_LOWER_THAN_EXP = 1, 192 CMDSN_LOWER_THAN_EXP = 1,
194 CMDSN_HIGHER_THAN_EXP = 2, 193 CMDSN_HIGHER_THAN_EXP = 2,
194 CMDSN_MAXCMDSN_OVERRUN = 3,
195}; 195};
196 196
197/* Used for iscsi_handle_immediate_data() return values */ 197/* Used for iscsi_handle_immediate_data() return values */
@@ -650,14 +650,13 @@ struct iscsi_session {
650 /* Used for session reference counting */ 650 /* Used for session reference counting */
651 int session_usage_count; 651 int session_usage_count;
652 int session_waiting_on_uc; 652 int session_waiting_on_uc;
653 u32 cmd_pdus; 653 atomic_long_t cmd_pdus;
654 u32 rsp_pdus; 654 atomic_long_t rsp_pdus;
655 u64 tx_data_octets; 655 atomic_long_t tx_data_octets;
656 u64 rx_data_octets; 656 atomic_long_t rx_data_octets;
657 u32 conn_digest_errors; 657 atomic_long_t conn_digest_errors;
658 u32 conn_timeout_errors; 658 atomic_long_t conn_timeout_errors;
659 u64 creation_time; 659 u64 creation_time;
660 spinlock_t session_stats_lock;
661 /* Number of active connections */ 660 /* Number of active connections */
662 atomic_t nconn; 661 atomic_t nconn;
663 atomic_t session_continuation; 662 atomic_t session_continuation;
@@ -755,11 +754,6 @@ struct iscsi_node_acl {
755 struct se_node_acl se_node_acl; 754 struct se_node_acl se_node_acl;
756}; 755};
757 756
758#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
759
760#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
761#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
762
763struct iscsi_tpg_attrib { 757struct iscsi_tpg_attrib {
764 u32 authentication; 758 u32 authentication;
765 u32 login_timeout; 759 u32 login_timeout;
@@ -769,6 +763,8 @@ struct iscsi_tpg_attrib {
769 u32 default_cmdsn_depth; 763 u32 default_cmdsn_depth;
770 u32 demo_mode_write_protect; 764 u32 demo_mode_write_protect;
771 u32 prod_mode_write_protect; 765 u32 prod_mode_write_protect;
766 u32 demo_mode_discovery;
767 u32 default_erl;
772 struct iscsi_portal_group *tpg; 768 struct iscsi_portal_group *tpg;
773}; 769};
774 770
@@ -835,12 +831,6 @@ struct iscsi_portal_group {
835 struct list_head tpg_list; 831 struct list_head tpg_list;
836} ____cacheline_aligned; 832} ____cacheline_aligned;
837 833
838#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
839#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
840#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
841#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
842#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
843
844struct iscsi_wwn_stat_grps { 834struct iscsi_wwn_stat_grps {
845 struct config_group iscsi_stat_group; 835 struct config_group iscsi_stat_group;
846 struct config_group iscsi_instance_group; 836 struct config_group iscsi_instance_group;
@@ -871,8 +861,6 @@ struct iscsi_tiqn {
871 struct iscsi_logout_stats logout_stats; 861 struct iscsi_logout_stats logout_stats;
872} ____cacheline_aligned; 862} ____cacheline_aligned;
873 863
874#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
875
876struct iscsit_global { 864struct iscsit_global {
877 /* In core shutdown */ 865 /* In core shutdown */
878 u32 in_shutdown; 866 u32 in_shutdown;
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 6c7a5104a4cd..7087c736daa5 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -58,11 +58,7 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess
58 58
59 cmd->maxcmdsn_inc = 1; 59 cmd->maxcmdsn_inc = 1;
60 60
61 if (!mutex_trylock(&sess->cmdsn_mutex)) { 61 mutex_lock(&sess->cmdsn_mutex);
62 sess->max_cmd_sn += 1;
63 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
64 return;
65 }
66 sess->max_cmd_sn += 1; 62 sess->max_cmd_sn += 1;
67 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); 63 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
68 mutex_unlock(&sess->cmdsn_mutex); 64 mutex_unlock(&sess->cmdsn_mutex);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 41052e512d92..0d1e6ee3e992 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -757,7 +757,7 @@ int iscsit_check_post_dataout(
757static void iscsit_handle_time2retain_timeout(unsigned long data) 757static void iscsit_handle_time2retain_timeout(unsigned long data)
758{ 758{
759 struct iscsi_session *sess = (struct iscsi_session *) data; 759 struct iscsi_session *sess = (struct iscsi_session *) data;
760 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 760 struct iscsi_portal_group *tpg = sess->tpg;
761 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 761 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
762 762
763 spin_lock_bh(&se_tpg->session_lock); 763 spin_lock_bh(&se_tpg->session_lock);
@@ -785,7 +785,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
785 tiqn->sess_err_stats.last_sess_failure_type = 785 tiqn->sess_err_stats.last_sess_failure_type =
786 ISCSI_SESS_ERR_CXN_TIMEOUT; 786 ISCSI_SESS_ERR_CXN_TIMEOUT;
787 tiqn->sess_err_stats.cxn_timeout_errors++; 787 tiqn->sess_err_stats.cxn_timeout_errors++;
788 sess->conn_timeout_errors++; 788 atomic_long_inc(&sess->conn_timeout_errors);
789 spin_unlock(&tiqn->sess_err_stats.lock); 789 spin_unlock(&tiqn->sess_err_stats.lock);
790 } 790 }
791 } 791 }
@@ -801,9 +801,9 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
801 * Only start Time2Retain timer when the associated TPG is still in 801 * Only start Time2Retain timer when the associated TPG is still in
802 * an ACTIVE (eg: not disabled or shutdown) state. 802 * an ACTIVE (eg: not disabled or shutdown) state.
803 */ 803 */
804 spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock); 804 spin_lock(&sess->tpg->tpg_state_lock);
805 tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE); 805 tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE);
806 spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock); 806 spin_unlock(&sess->tpg->tpg_state_lock);
807 807
808 if (!tpg_active) 808 if (!tpg_active)
809 return; 809 return;
@@ -829,7 +829,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
829 */ 829 */
830int iscsit_stop_time2retain_timer(struct iscsi_session *sess) 830int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
831{ 831{
832 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 832 struct iscsi_portal_group *tpg = sess->tpg;
833 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 833 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
834 834
835 if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED) 835 if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 1794c753954a..4eb93b2b6473 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -305,7 +305,6 @@ static int iscsi_login_zero_tsih_s1(
305 } 305 }
306 306
307 sess->creation_time = get_jiffies_64(); 307 sess->creation_time = get_jiffies_64();
308 spin_lock_init(&sess->session_stats_lock);
309 /* 308 /*
310 * The FFP CmdSN window values will be allocated from the TPG's 309 * The FFP CmdSN window values will be allocated from the TPG's
311 * Initiator Node's ACL once the login has been successfully completed. 310 * Initiator Node's ACL once the login has been successfully completed.
@@ -347,15 +346,15 @@ static int iscsi_login_zero_tsih_s2(
347 * Assign a new TPG Session Handle. Note this is protected with 346 * Assign a new TPG Session Handle. Note this is protected with
348 * struct iscsi_portal_group->np_login_sem from iscsit_access_np(). 347 * struct iscsi_portal_group->np_login_sem from iscsit_access_np().
349 */ 348 */
350 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; 349 sess->tsih = ++sess->tpg->ntsih;
351 if (!sess->tsih) 350 if (!sess->tsih)
352 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; 351 sess->tsih = ++sess->tpg->ntsih;
353 352
354 /* 353 /*
355 * Create the default params from user defined values.. 354 * Create the default params from user defined values..
356 */ 355 */
357 if (iscsi_copy_param_list(&conn->param_list, 356 if (iscsi_copy_param_list(&conn->param_list,
358 ISCSI_TPG_C(conn)->param_list, 1) < 0) { 357 conn->tpg->param_list, 1) < 0) {
359 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 358 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
360 ISCSI_LOGIN_STATUS_NO_RESOURCES); 359 ISCSI_LOGIN_STATUS_NO_RESOURCES);
361 return -1; 360 return -1;
@@ -380,7 +379,7 @@ static int iscsi_login_zero_tsih_s2(
380 * In our case, we have already located the struct iscsi_tiqn at this point. 379 * In our case, we have already located the struct iscsi_tiqn at this point.
381 */ 380 */
382 memset(buf, 0, 32); 381 memset(buf, 0, 32);
383 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); 382 sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
384 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 383 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
385 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 384 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
386 ISCSI_LOGIN_STATUS_NO_RESOURCES); 385 ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -575,7 +574,7 @@ static int iscsi_login_non_zero_tsih_s2(
575 iscsi_login_set_conn_values(sess, conn, pdu->cid); 574 iscsi_login_set_conn_values(sess, conn, pdu->cid);
576 575
577 if (iscsi_copy_param_list(&conn->param_list, 576 if (iscsi_copy_param_list(&conn->param_list,
578 ISCSI_TPG_C(conn)->param_list, 0) < 0) { 577 conn->tpg->param_list, 0) < 0) {
579 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 578 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
580 ISCSI_LOGIN_STATUS_NO_RESOURCES); 579 ISCSI_LOGIN_STATUS_NO_RESOURCES);
581 return -1; 580 return -1;
@@ -593,7 +592,7 @@ static int iscsi_login_non_zero_tsih_s2(
593 * In our case, we have already located the struct iscsi_tiqn at this point. 592 * In our case, we have already located the struct iscsi_tiqn at this point.
594 */ 593 */
595 memset(buf, 0, 32); 594 memset(buf, 0, 32);
596 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); 595 sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
597 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 596 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
598 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 597 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
599 ISCSI_LOGIN_STATUS_NO_RESOURCES); 598 ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -691,7 +690,7 @@ int iscsi_post_login_handler(
691 int stop_timer = 0; 690 int stop_timer = 0;
692 struct iscsi_session *sess = conn->sess; 691 struct iscsi_session *sess = conn->sess;
693 struct se_session *se_sess = sess->se_sess; 692 struct se_session *se_sess = sess->se_sess;
694 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 693 struct iscsi_portal_group *tpg = sess->tpg;
695 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 694 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
696 struct iscsi_thread_set *ts; 695 struct iscsi_thread_set *ts;
697 696
@@ -1154,7 +1153,7 @@ old_sess_out:
1154 spin_lock_bh(&conn->sess->conn_lock); 1153 spin_lock_bh(&conn->sess->conn_lock);
1155 if (conn->sess->session_state == TARG_SESS_STATE_FAILED) { 1154 if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
1156 struct se_portal_group *se_tpg = 1155 struct se_portal_group *se_tpg =
1157 &ISCSI_TPG_C(conn)->tpg_se_tpg; 1156 &conn->tpg->tpg_se_tpg;
1158 1157
1159 atomic_set(&conn->sess->session_continuation, 0); 1158 atomic_set(&conn->sess->session_continuation, 0);
1160 spin_unlock_bh(&conn->sess->conn_lock); 1159 spin_unlock_bh(&conn->sess->conn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index ef6d836a4d09..83c965c65386 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -88,7 +88,7 @@ int extract_param(
88 if (len < 0) 88 if (len < 0)
89 return -1; 89 return -1;
90 90
91 if (len > max_length) { 91 if (len >= max_length) {
92 pr_err("Length of input: %d exceeds max_length:" 92 pr_err("Length of input: %d exceeds max_length:"
93 " %d\n", len, max_length); 93 " %d\n", len, max_length);
94 return -1; 94 return -1;
@@ -140,7 +140,7 @@ static u32 iscsi_handle_authentication(
140 iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl, 140 iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
141 se_node_acl); 141 se_node_acl);
142 142
143 auth = ISCSI_NODE_AUTH(iscsi_nacl); 143 auth = &iscsi_nacl->node_auth;
144 } 144 }
145 } else { 145 } else {
146 /* 146 /*
@@ -789,7 +789,7 @@ static int iscsi_target_handle_csg_zero(
789 return -1; 789 return -1;
790 790
791 if (!iscsi_check_negotiated_keys(conn->param_list)) { 791 if (!iscsi_check_negotiated_keys(conn->param_list)) {
792 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && 792 if (conn->tpg->tpg_attrib.authentication &&
793 !strncmp(param->value, NONE, 4)) { 793 !strncmp(param->value, NONE, 4)) {
794 pr_err("Initiator sent AuthMethod=None but" 794 pr_err("Initiator sent AuthMethod=None but"
795 " Target is enforcing iSCSI Authentication," 795 " Target is enforcing iSCSI Authentication,"
@@ -799,7 +799,7 @@ static int iscsi_target_handle_csg_zero(
799 return -1; 799 return -1;
800 } 800 }
801 801
802 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && 802 if (conn->tpg->tpg_attrib.authentication &&
803 !login->auth_complete) 803 !login->auth_complete)
804 return 0; 804 return 0;
805 805
@@ -862,7 +862,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
862 } 862 }
863 863
864 if (!login->auth_complete && 864 if (!login->auth_complete &&
865 ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) { 865 conn->tpg->tpg_attrib.authentication) {
866 pr_err("Initiator is requesting CSG: 1, has not been" 866 pr_err("Initiator is requesting CSG: 1, has not been"
867 " successfully authenticated, and the Target is" 867 " successfully authenticated, and the Target is"
868 " enforcing iSCSI Authentication, login failed.\n"); 868 " enforcing iSCSI Authentication, login failed.\n");
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 93bdc475eb00..16454a922e2b 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -33,7 +33,8 @@ static inline char *iscsit_na_get_initiatorname(
33} 33}
34 34
35void iscsit_set_default_node_attribues( 35void iscsit_set_default_node_attribues(
36 struct iscsi_node_acl *acl) 36 struct iscsi_node_acl *acl,
37 struct iscsi_portal_group *tpg)
37{ 38{
38 struct iscsi_node_attrib *a = &acl->node_attrib; 39 struct iscsi_node_attrib *a = &acl->node_attrib;
39 40
@@ -44,7 +45,7 @@ void iscsit_set_default_node_attribues(
44 a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS; 45 a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
45 a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS; 46 a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
46 a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS; 47 a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
47 a->default_erl = NA_DEFAULT_ERL; 48 a->default_erl = tpg->tpg_attrib.default_erl;
48} 49}
49 50
50int iscsit_na_dataout_timeout( 51int iscsit_na_dataout_timeout(
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
index c970b326ef23..0c69a46a62ec 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -1,7 +1,8 @@
1#ifndef ISCSI_TARGET_NODEATTRIB_H 1#ifndef ISCSI_TARGET_NODEATTRIB_H
2#define ISCSI_TARGET_NODEATTRIB_H 2#define ISCSI_TARGET_NODEATTRIB_H
3 3
4extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *); 4extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
5 struct iscsi_portal_group *);
5extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32); 6extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
6extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32); 7extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
7extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32); 8extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index f788e8b5e855..103395510307 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -792,7 +792,8 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
792 if (se_sess) { 792 if (se_sess) {
793 sess = se_sess->fabric_sess_ptr; 793 sess = se_sess->fabric_sess_ptr;
794 if (sess) 794 if (sess)
795 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); 795 ret = snprintf(page, PAGE_SIZE, "%lu\n",
796 atomic_long_read(&sess->cmd_pdus));
796 } 797 }
797 spin_unlock_bh(&se_nacl->nacl_sess_lock); 798 spin_unlock_bh(&se_nacl->nacl_sess_lock);
798 799
@@ -815,7 +816,8 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
815 if (se_sess) { 816 if (se_sess) {
816 sess = se_sess->fabric_sess_ptr; 817 sess = se_sess->fabric_sess_ptr;
817 if (sess) 818 if (sess)
818 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); 819 ret = snprintf(page, PAGE_SIZE, "%lu\n",
820 atomic_long_read(&sess->rsp_pdus));
819 } 821 }
820 spin_unlock_bh(&se_nacl->nacl_sess_lock); 822 spin_unlock_bh(&se_nacl->nacl_sess_lock);
821 823
@@ -838,8 +840,8 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
838 if (se_sess) { 840 if (se_sess) {
839 sess = se_sess->fabric_sess_ptr; 841 sess = se_sess->fabric_sess_ptr;
840 if (sess) 842 if (sess)
841 ret = snprintf(page, PAGE_SIZE, "%llu\n", 843 ret = snprintf(page, PAGE_SIZE, "%lu\n",
842 (unsigned long long)sess->tx_data_octets); 844 atomic_long_read(&sess->tx_data_octets));
843 } 845 }
844 spin_unlock_bh(&se_nacl->nacl_sess_lock); 846 spin_unlock_bh(&se_nacl->nacl_sess_lock);
845 847
@@ -862,8 +864,8 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
862 if (se_sess) { 864 if (se_sess) {
863 sess = se_sess->fabric_sess_ptr; 865 sess = se_sess->fabric_sess_ptr;
864 if (sess) 866 if (sess)
865 ret = snprintf(page, PAGE_SIZE, "%llu\n", 867 ret = snprintf(page, PAGE_SIZE, "%lu\n",
866 (unsigned long long)sess->rx_data_octets); 868 atomic_long_read(&sess->rx_data_octets));
867 } 869 }
868 spin_unlock_bh(&se_nacl->nacl_sess_lock); 870 spin_unlock_bh(&se_nacl->nacl_sess_lock);
869 871
@@ -886,8 +888,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
886 if (se_sess) { 888 if (se_sess) {
887 sess = se_sess->fabric_sess_ptr; 889 sess = se_sess->fabric_sess_ptr;
888 if (sess) 890 if (sess)
889 ret = snprintf(page, PAGE_SIZE, "%u\n", 891 ret = snprintf(page, PAGE_SIZE, "%lu\n",
890 sess->conn_digest_errors); 892 atomic_long_read(&sess->conn_digest_errors));
891 } 893 }
892 spin_unlock_bh(&se_nacl->nacl_sess_lock); 894 spin_unlock_bh(&se_nacl->nacl_sess_lock);
893 895
@@ -910,8 +912,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
910 if (se_sess) { 912 if (se_sess) {
911 sess = se_sess->fabric_sess_ptr; 913 sess = se_sess->fabric_sess_ptr;
912 if (sess) 914 if (sess)
913 ret = snprintf(page, PAGE_SIZE, "%u\n", 915 ret = snprintf(page, PAGE_SIZE, "%lu\n",
914 sess->conn_timeout_errors); 916 atomic_long_read(&sess->conn_timeout_errors));
915 } 917 }
916 spin_unlock_bh(&se_nacl->nacl_sess_lock); 918 spin_unlock_bh(&se_nacl->nacl_sess_lock);
917 919
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4faeb47fa5e1..39761837608d 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -223,6 +223,8 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
223 a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS; 223 a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
224 a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT; 224 a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
225 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; 225 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
226 a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
227 a->default_erl = TA_DEFAULT_ERL;
226} 228}
227 229
228int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) 230int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -237,7 +239,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro
237 if (iscsi_create_default_params(&tpg->param_list) < 0) 239 if (iscsi_create_default_params(&tpg->param_list) < 0)
238 goto err_out; 240 goto err_out;
239 241
240 ISCSI_TPG_ATTRIB(tpg)->tpg = tpg; 242 tpg->tpg_attrib.tpg = tpg;
241 243
242 spin_lock(&tpg->tpg_state_lock); 244 spin_lock(&tpg->tpg_state_lock);
243 tpg->tpg_state = TPG_STATE_INACTIVE; 245 tpg->tpg_state = TPG_STATE_INACTIVE;
@@ -330,7 +332,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
330 return -EINVAL; 332 return -EINVAL;
331 } 333 }
332 334
333 if (ISCSI_TPG_ATTRIB(tpg)->authentication) { 335 if (tpg->tpg_attrib.authentication) {
334 if (!strcmp(param->value, NONE)) { 336 if (!strcmp(param->value, NONE)) {
335 ret = iscsi_update_param_value(param, CHAP); 337 ret = iscsi_update_param_value(param, CHAP);
336 if (ret) 338 if (ret)
@@ -820,3 +822,39 @@ int iscsit_ta_prod_mode_write_protect(
820 822
821 return 0; 823 return 0;
822} 824}
825
826int iscsit_ta_demo_mode_discovery(
827 struct iscsi_portal_group *tpg,
828 u32 flag)
829{
830 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
831
832 if ((flag != 0) && (flag != 1)) {
833 pr_err("Illegal value %d\n", flag);
834 return -EINVAL;
835 }
836
837 a->demo_mode_discovery = flag;
838 pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:"
839 " %s\n", tpg->tpgt, (a->demo_mode_discovery) ?
840 "ON" : "OFF");
841
842 return 0;
843}
844
845int iscsit_ta_default_erl(
846 struct iscsi_portal_group *tpg,
847 u32 default_erl)
848{
849 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
850
851 if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) {
852 pr_err("Illegal value for default_erl: %u\n", default_erl);
853 return -EINVAL;
854 }
855
856 a->default_erl = default_erl;
857 pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl);
858
859 return 0;
860}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index b77693e2c209..213c0fc7fdc9 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -37,5 +37,7 @@ extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
37extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32); 37extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
38extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32); 38extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
39extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); 39extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
40extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
41extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
40 42
41#endif /* ISCSI_TARGET_TPG_H */ 43#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index b0cac0c342e1..0819e688a398 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -242,9 +242,9 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
242 */ 242 */
243 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) { 243 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
244 pr_err("Received CmdSN: 0x%08x is greater than" 244 pr_err("Received CmdSN: 0x%08x is greater than"
245 " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn, 245 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn,
246 sess->max_cmd_sn); 246 sess->max_cmd_sn);
247 ret = CMDSN_ERROR_CANNOT_RECOVER; 247 ret = CMDSN_MAXCMDSN_OVERRUN;
248 248
249 } else if (cmdsn == sess->exp_cmd_sn) { 249 } else if (cmdsn == sess->exp_cmd_sn) {
250 sess->exp_cmd_sn++; 250 sess->exp_cmd_sn++;
@@ -303,14 +303,16 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
303 ret = CMDSN_HIGHER_THAN_EXP; 303 ret = CMDSN_HIGHER_THAN_EXP;
304 break; 304 break;
305 case CMDSN_LOWER_THAN_EXP: 305 case CMDSN_LOWER_THAN_EXP:
306 case CMDSN_MAXCMDSN_OVERRUN:
307 default:
306 cmd->i_state = ISTATE_REMOVE; 308 cmd->i_state = ISTATE_REMOVE;
307 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 309 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
308 ret = cmdsn_ret; 310 /*
309 break; 311 * Existing callers for iscsit_sequence_cmd() will silently
310 default: 312 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
311 reason = ISCSI_REASON_PROTOCOL_ERROR; 313 * return for CMDSN_MAXCMDSN_OVERRUN as well..
312 reject = true; 314 */
313 ret = cmdsn_ret; 315 ret = CMDSN_LOWER_THAN_EXP;
314 break; 316 break;
315 } 317 }
316 mutex_unlock(&conn->sess->cmdsn_mutex); 318 mutex_unlock(&conn->sess->cmdsn_mutex);
@@ -980,7 +982,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
980 tiqn->sess_err_stats.last_sess_failure_type = 982 tiqn->sess_err_stats.last_sess_failure_type =
981 ISCSI_SESS_ERR_CXN_TIMEOUT; 983 ISCSI_SESS_ERR_CXN_TIMEOUT;
982 tiqn->sess_err_stats.cxn_timeout_errors++; 984 tiqn->sess_err_stats.cxn_timeout_errors++;
983 conn->sess->conn_timeout_errors++; 985 atomic_long_inc(&conn->sess->conn_timeout_errors);
984 spin_unlock_bh(&tiqn->sess_err_stats.lock); 986 spin_unlock_bh(&tiqn->sess_err_stats.lock);
985 } 987 }
986 } 988 }
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 0f6d69dabca1..1b41e6776152 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -135,6 +135,21 @@ static int tcm_loop_change_queue_depth(
135 return sdev->queue_depth; 135 return sdev->queue_depth;
136} 136}
137 137
138static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
139{
140 if (sdev->tagged_supported) {
141 scsi_set_tag_type(sdev, tag);
142
143 if (tag)
144 scsi_activate_tcq(sdev, sdev->queue_depth);
145 else
146 scsi_deactivate_tcq(sdev, sdev->queue_depth);
147 } else
148 tag = 0;
149
150 return tag;
151}
152
138/* 153/*
139 * Locate the SAM Task Attr from struct scsi_cmnd * 154 * Locate the SAM Task Attr from struct scsi_cmnd *
140 */ 155 */
@@ -178,7 +193,10 @@ static void tcm_loop_submission_work(struct work_struct *work)
178 set_host_byte(sc, DID_NO_CONNECT); 193 set_host_byte(sc, DID_NO_CONNECT);
179 goto out_done; 194 goto out_done;
180 } 195 }
181 196 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
197 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
198 goto out_done;
199 }
182 tl_nexus = tl_hba->tl_nexus; 200 tl_nexus = tl_hba->tl_nexus;
183 if (!tl_nexus) { 201 if (!tl_nexus) {
184 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" 202 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
@@ -233,6 +251,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
233 } 251 }
234 252
235 tl_cmd->sc = sc; 253 tl_cmd->sc = sc;
254 tl_cmd->sc_cmd_tag = sc->tag;
236 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); 255 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
237 queue_work(tcm_loop_workqueue, &tl_cmd->work); 256 queue_work(tcm_loop_workqueue, &tl_cmd->work);
238 return 0; 257 return 0;
@@ -242,41 +261,21 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
242 * Called from SCSI EH process context to issue a LUN_RESET TMR 261 * Called from SCSI EH process context to issue a LUN_RESET TMR
243 * to struct scsi_device 262 * to struct scsi_device
244 */ 263 */
245static int tcm_loop_device_reset(struct scsi_cmnd *sc) 264static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
265 struct tcm_loop_nexus *tl_nexus,
266 int lun, int task, enum tcm_tmreq_table tmr)
246{ 267{
247 struct se_cmd *se_cmd = NULL; 268 struct se_cmd *se_cmd = NULL;
248 struct se_portal_group *se_tpg;
249 struct se_session *se_sess; 269 struct se_session *se_sess;
270 struct se_portal_group *se_tpg;
250 struct tcm_loop_cmd *tl_cmd = NULL; 271 struct tcm_loop_cmd *tl_cmd = NULL;
251 struct tcm_loop_hba *tl_hba;
252 struct tcm_loop_nexus *tl_nexus;
253 struct tcm_loop_tmr *tl_tmr = NULL; 272 struct tcm_loop_tmr *tl_tmr = NULL;
254 struct tcm_loop_tpg *tl_tpg; 273 int ret = TMR_FUNCTION_FAILED, rc;
255 int ret = FAILED, rc;
256 /*
257 * Locate the tcm_loop_hba_t pointer
258 */
259 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
260 /*
261 * Locate the tl_nexus and se_sess pointers
262 */
263 tl_nexus = tl_hba->tl_nexus;
264 if (!tl_nexus) {
265 pr_err("Unable to perform device reset without"
266 " active I_T Nexus\n");
267 return FAILED;
268 }
269 se_sess = tl_nexus->se_sess;
270 /*
271 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
272 */
273 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
274 se_tpg = &tl_tpg->tl_se_tpg;
275 274
276 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 275 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
277 if (!tl_cmd) { 276 if (!tl_cmd) {
278 pr_err("Unable to allocate memory for tl_cmd\n"); 277 pr_err("Unable to allocate memory for tl_cmd\n");
279 return FAILED; 278 return ret;
280 } 279 }
281 280
282 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); 281 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
@@ -287,6 +286,8 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
287 init_waitqueue_head(&tl_tmr->tl_tmr_wait); 286 init_waitqueue_head(&tl_tmr->tl_tmr_wait);
288 287
289 se_cmd = &tl_cmd->tl_se_cmd; 288 se_cmd = &tl_cmd->tl_se_cmd;
289 se_tpg = &tl_tpg->tl_se_tpg;
290 se_sess = tl_nexus->se_sess;
290 /* 291 /*
291 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 292 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
292 */ 293 */
@@ -294,17 +295,23 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
294 DMA_NONE, MSG_SIMPLE_TAG, 295 DMA_NONE, MSG_SIMPLE_TAG,
295 &tl_cmd->tl_sense_buf[0]); 296 &tl_cmd->tl_sense_buf[0]);
296 297
297 rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL); 298 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
298 if (rc < 0) 299 if (rc < 0)
299 goto release; 300 goto release;
301
302 if (tmr == TMR_ABORT_TASK)
303 se_cmd->se_tmr_req->ref_task_tag = task;
304
300 /* 305 /*
301 * Locate the underlying TCM struct se_lun from sc->device->lun 306 * Locate the underlying TCM struct se_lun
302 */ 307 */
303 if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) 308 if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
309 ret = TMR_LUN_DOES_NOT_EXIST;
304 goto release; 310 goto release;
311 }
305 /* 312 /*
306 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() 313 * Queue the TMR to TCM Core and sleep waiting for
307 * to wake us up. 314 * tcm_loop_queue_tm_rsp() to wake us up.
308 */ 315 */
309 transport_generic_handle_tmr(se_cmd); 316 transport_generic_handle_tmr(se_cmd);
310 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); 317 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
@@ -312,8 +319,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
312 * The TMR LUN_RESET has completed, check the response status and 319 * The TMR LUN_RESET has completed, check the response status and
313 * then release allocations. 320 * then release allocations.
314 */ 321 */
315 ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? 322 ret = se_cmd->se_tmr_req->response;
316 SUCCESS : FAILED;
317release: 323release:
318 if (se_cmd) 324 if (se_cmd)
319 transport_generic_free_cmd(se_cmd, 1); 325 transport_generic_free_cmd(se_cmd, 1);
@@ -323,6 +329,94 @@ release:
323 return ret; 329 return ret;
324} 330}
325 331
332static int tcm_loop_abort_task(struct scsi_cmnd *sc)
333{
334 struct tcm_loop_hba *tl_hba;
335 struct tcm_loop_nexus *tl_nexus;
336 struct tcm_loop_tpg *tl_tpg;
337 int ret = FAILED;
338
339 /*
340 * Locate the tcm_loop_hba_t pointer
341 */
342 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
343 /*
344 * Locate the tl_nexus and se_sess pointers
345 */
346 tl_nexus = tl_hba->tl_nexus;
347 if (!tl_nexus) {
348 pr_err("Unable to perform device reset without"
349 " active I_T Nexus\n");
350 return FAILED;
351 }
352
353 /*
354 * Locate the tl_tpg pointer from TargetID in sc->device->id
355 */
356 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
357 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
358 sc->tag, TMR_ABORT_TASK);
359 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
360}
361
362/*
363 * Called from SCSI EH process context to issue a LUN_RESET TMR
364 * to struct scsi_device
365 */
366static int tcm_loop_device_reset(struct scsi_cmnd *sc)
367{
368 struct tcm_loop_hba *tl_hba;
369 struct tcm_loop_nexus *tl_nexus;
370 struct tcm_loop_tpg *tl_tpg;
371 int ret = FAILED;
372
373 /*
374 * Locate the tcm_loop_hba_t pointer
375 */
376 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
377 /*
378 * Locate the tl_nexus and se_sess pointers
379 */
380 tl_nexus = tl_hba->tl_nexus;
381 if (!tl_nexus) {
382 pr_err("Unable to perform device reset without"
383 " active I_T Nexus\n");
384 return FAILED;
385 }
386 /*
387 * Locate the tl_tpg pointer from TargetID in sc->device->id
388 */
389 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
390 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
391 0, TMR_LUN_RESET);
392 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
393}
394
395static int tcm_loop_target_reset(struct scsi_cmnd *sc)
396{
397 struct tcm_loop_hba *tl_hba;
398 struct tcm_loop_tpg *tl_tpg;
399
400 /*
401 * Locate the tcm_loop_hba_t pointer
402 */
403 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
404 if (!tl_hba) {
405 pr_err("Unable to perform device reset without"
406 " active I_T Nexus\n");
407 return FAILED;
408 }
409 /*
410 * Locate the tl_tpg pointer from TargetID in sc->device->id
411 */
412 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
413 if (tl_tpg) {
414 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
415 return SUCCESS;
416 }
417 return FAILED;
418}
419
326static int tcm_loop_slave_alloc(struct scsi_device *sd) 420static int tcm_loop_slave_alloc(struct scsi_device *sd)
327{ 421{
328 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); 422 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
@@ -331,6 +425,15 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd)
331 425
332static int tcm_loop_slave_configure(struct scsi_device *sd) 426static int tcm_loop_slave_configure(struct scsi_device *sd)
333{ 427{
428 if (sd->tagged_supported) {
429 scsi_activate_tcq(sd, sd->queue_depth);
430 scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
431 sd->host->cmd_per_lun);
432 } else {
433 scsi_adjust_queue_depth(sd, 0,
434 sd->host->cmd_per_lun);
435 }
436
334 return 0; 437 return 0;
335} 438}
336 439
@@ -340,7 +443,10 @@ static struct scsi_host_template tcm_loop_driver_template = {
340 .name = "TCM_Loopback", 443 .name = "TCM_Loopback",
341 .queuecommand = tcm_loop_queuecommand, 444 .queuecommand = tcm_loop_queuecommand,
342 .change_queue_depth = tcm_loop_change_queue_depth, 445 .change_queue_depth = tcm_loop_change_queue_depth,
446 .change_queue_type = tcm_loop_change_queue_type,
447 .eh_abort_handler = tcm_loop_abort_task,
343 .eh_device_reset_handler = tcm_loop_device_reset, 448 .eh_device_reset_handler = tcm_loop_device_reset,
449 .eh_target_reset_handler = tcm_loop_target_reset,
344 .can_queue = 1024, 450 .can_queue = 1024,
345 .this_id = -1, 451 .this_id = -1,
346 .sg_tablesize = 256, 452 .sg_tablesize = 256,
@@ -699,7 +805,10 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
699 805
700static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) 806static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
701{ 807{
702 return 1; 808 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
809 struct tcm_loop_cmd, tl_se_cmd);
810
811 return tl_cmd->sc_cmd_tag;
703} 812}
704 813
705static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) 814static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
@@ -932,7 +1041,10 @@ static int tcm_loop_drop_nexus(
932 struct tcm_loop_nexus *tl_nexus; 1041 struct tcm_loop_nexus *tl_nexus;
933 struct tcm_loop_hba *tl_hba = tpg->tl_hba; 1042 struct tcm_loop_hba *tl_hba = tpg->tl_hba;
934 1043
935 tl_nexus = tpg->tl_hba->tl_nexus; 1044 if (!tl_hba)
1045 return -ENODEV;
1046
1047 tl_nexus = tl_hba->tl_nexus;
936 if (!tl_nexus) 1048 if (!tl_nexus)
937 return -ENODEV; 1049 return -ENODEV;
938 1050
@@ -1061,8 +1173,56 @@ check_newline:
1061 1173
1062TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); 1174TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
1063 1175
1176static ssize_t tcm_loop_tpg_show_transport_status(
1177 struct se_portal_group *se_tpg,
1178 char *page)
1179{
1180 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1181 struct tcm_loop_tpg, tl_se_tpg);
1182 const char *status = NULL;
1183 ssize_t ret = -EINVAL;
1184
1185 switch (tl_tpg->tl_transport_status) {
1186 case TCM_TRANSPORT_ONLINE:
1187 status = "online";
1188 break;
1189 case TCM_TRANSPORT_OFFLINE:
1190 status = "offline";
1191 break;
1192 default:
1193 break;
1194 }
1195
1196 if (status)
1197 ret = snprintf(page, PAGE_SIZE, "%s\n", status);
1198
1199 return ret;
1200}
1201
1202static ssize_t tcm_loop_tpg_store_transport_status(
1203 struct se_portal_group *se_tpg,
1204 const char *page,
1205 size_t count)
1206{
1207 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1208 struct tcm_loop_tpg, tl_se_tpg);
1209
1210 if (!strncmp(page, "online", 6)) {
1211 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1212 return count;
1213 }
1214 if (!strncmp(page, "offline", 7)) {
1215 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1216 return count;
1217 }
1218 return -EINVAL;
1219}
1220
1221TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
1222
1064static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 1223static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1065 &tcm_loop_tpg_nexus.attr, 1224 &tcm_loop_tpg_nexus.attr,
1225 &tcm_loop_tpg_transport_status.attr,
1066 NULL, 1226 NULL,
1067}; 1227};
1068 1228
@@ -1334,11 +1494,11 @@ static int tcm_loop_register_configfs(void)
1334 /* 1494 /*
1335 * Setup default attribute lists for various fabric->tf_cit_tmpl 1495 * Setup default attribute lists for various fabric->tf_cit_tmpl
1336 */ 1496 */
1337 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; 1497 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
1338 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; 1498 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
1339 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 1499 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
1340 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1500 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1341 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1501 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
1342 /* 1502 /*
1343 * Once fabric->tf_ops has been setup, now register the fabric for 1503 * Once fabric->tf_ops has been setup, now register the fabric for
1344 * use within TCM 1504 * use within TCM
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index dd7a84ee78e1..54c59d0b6608 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -10,6 +10,8 @@
10struct tcm_loop_cmd { 10struct tcm_loop_cmd {
11 /* State of Linux/SCSI CDB+Data descriptor */ 11 /* State of Linux/SCSI CDB+Data descriptor */
12 u32 sc_cmd_state; 12 u32 sc_cmd_state;
13 /* Tagged command queueing */
14 u32 sc_cmd_tag;
13 /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ 15 /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
14 struct scsi_cmnd *sc; 16 struct scsi_cmnd *sc;
15 /* The TCM I/O descriptor that is accessed via container_of() */ 17 /* The TCM I/O descriptor that is accessed via container_of() */
@@ -40,8 +42,12 @@ struct tcm_loop_nacl {
40 struct se_node_acl se_node_acl; 42 struct se_node_acl se_node_acl;
41}; 43};
42 44
45#define TCM_TRANSPORT_ONLINE 0
46#define TCM_TRANSPORT_OFFLINE 1
47
43struct tcm_loop_tpg { 48struct tcm_loop_tpg {
44 unsigned short tl_tpgt; 49 unsigned short tl_tpgt;
50 unsigned short tl_transport_status;
45 atomic_t tl_tpg_port_count; 51 atomic_t tl_tpg_port_count;
46 struct se_portal_group tl_se_tpg; 52 struct se_portal_group tl_se_tpg;
47 struct tcm_loop_hba *tl_hba; 53 struct tcm_loop_hba *tl_hba;
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index e51b09a04d52..24884cac19ce 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -2556,15 +2556,15 @@ static int sbp_register_configfs(void)
2556 /* 2556 /*
2557 * Setup default attribute lists for various fabric->tf_cit_tmpl 2557 * Setup default attribute lists for various fabric->tf_cit_tmpl
2558 */ 2558 */
2559 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; 2559 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
2560 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; 2560 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
2561 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; 2561 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
2562 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 2562 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2563 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 2563 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2564 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2564 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2565 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2565 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2566 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2566 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2567 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2567 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2568 2568
2569 ret = target_fabric_configfs_register(fabric); 2569 ret = target_fabric_configfs_register(fabric);
2570 if (ret < 0) { 2570 if (ret < 0) {
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 47244102281e..fdcee326bfbc 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -44,7 +44,7 @@
44static sense_reason_t core_alua_check_transition(int state, int *primary); 44static sense_reason_t core_alua_check_transition(int state, int *primary);
45static int core_alua_set_tg_pt_secondary_state( 45static int core_alua_set_tg_pt_secondary_state(
46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
47 struct se_port *port, int explict, int offline); 47 struct se_port *port, int explicit, int offline);
48 48
49static u16 alua_lu_gps_counter; 49static u16 alua_lu_gps_counter;
50static u32 alua_lu_gps_count; 50static u32 alua_lu_gps_count;
@@ -117,12 +117,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
117 /* 117 /*
118 * Set supported ASYMMETRIC ACCESS State bits 118 * Set supported ASYMMETRIC ACCESS State bits
119 */ 119 */
120 buf[off] = 0x80; /* T_SUP */ 120 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
121 buf[off] |= 0x40; /* O_SUP */
122 buf[off] |= 0x8; /* U_SUP */
123 buf[off] |= 0x4; /* S_SUP */
124 buf[off] |= 0x2; /* AN_SUP */
125 buf[off++] |= 0x1; /* AO_SUP */
126 /* 121 /*
127 * TARGET PORT GROUP 122 * TARGET PORT GROUP
128 */ 123 */
@@ -175,7 +170,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
175 if (ext_hdr != 0) { 170 if (ext_hdr != 0) {
176 buf[4] = 0x10; 171 buf[4] = 0x10;
177 /* 172 /*
178 * Set the implict transition time (in seconds) for the application 173 * Set the implicit transition time (in seconds) for the application
179 * client to use as a base for it's transition timeout value. 174 * client to use as a base for it's transition timeout value.
180 * 175 *
181 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN 176 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
@@ -188,7 +183,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
188 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 183 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
189 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 184 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
190 if (tg_pt_gp) 185 if (tg_pt_gp)
191 buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs; 186 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
192 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 187 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
193 } 188 }
194 } 189 }
@@ -199,7 +194,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
199} 194}
200 195
201/* 196/*
202 * SET_TARGET_PORT_GROUPS for explict ALUA operation. 197 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
203 * 198 *
204 * See spc4r17 section 6.35 199 * See spc4r17 section 6.35
205 */ 200 */
@@ -232,7 +227,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
232 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 227 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
233 228
234 /* 229 /*
235 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed 230 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
236 * for the local tg_pt_gp. 231 * for the local tg_pt_gp.
237 */ 232 */
238 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 233 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
@@ -251,9 +246,9 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
251 } 246 }
252 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 247 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
253 248
254 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) { 249 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
255 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 250 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
256 " while TPGS_EXPLICT_ALUA is disabled\n"); 251 " while TPGS_EXPLICIT_ALUA is disabled\n");
257 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 252 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
258 goto out; 253 goto out;
259 } 254 }
@@ -330,7 +325,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
330 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 325 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
331 } else { 326 } else {
332 /* 327 /*
333 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify 328 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
334 * the Target Port in question for the the incoming 329 * the Target Port in question for the the incoming
335 * SET_TARGET_PORT_GROUPS op. 330 * SET_TARGET_PORT_GROUPS op.
336 */ 331 */
@@ -487,7 +482,7 @@ static inline int core_alua_state_transition(
487 u8 *alua_ascq) 482 u8 *alua_ascq)
488{ 483{
489 /* 484 /*
490 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by 485 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
491 * spc4r17 section 5.9.2.5 486 * spc4r17 section 5.9.2.5
492 */ 487 */
493 switch (cdb[0]) { 488 switch (cdb[0]) {
@@ -515,9 +510,9 @@ static inline int core_alua_state_transition(
515} 510}
516 511
517/* 512/*
518 * return 1: Is used to signal LUN not accecsable, and check condition/not ready 513 * return 1: Is used to signal LUN not accessible, and check condition/not ready
519 * return 0: Used to signal success 514 * return 0: Used to signal success
520 * reutrn -1: Used to signal failure, and invalid cdb field 515 * return -1: Used to signal failure, and invalid cdb field
521 */ 516 */
522sense_reason_t 517sense_reason_t
523target_alua_state_check(struct se_cmd *cmd) 518target_alua_state_check(struct se_cmd *cmd)
@@ -566,12 +561,12 @@ target_alua_state_check(struct se_cmd *cmd)
566 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 561 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
567 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 562 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
568 /* 563 /*
569 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional 564 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
570 * statement so the compiler knows explicitly to check this case first. 565 * statement so the compiler knows explicitly to check this case first.
571 * For the Optimized ALUA access state case, we want to process the 566 * For the Optimized ALUA access state case, we want to process the
572 * incoming fabric cmd ASAP.. 567 * incoming fabric cmd ASAP..
573 */ 568 */
574 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) 569 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
575 return 0; 570 return 0;
576 571
577 switch (out_alua_state) { 572 switch (out_alua_state) {
@@ -620,13 +615,13 @@ out:
620} 615}
621 616
622/* 617/*
623 * Check implict and explict ALUA state change request. 618 * Check implicit and explicit ALUA state change request.
624 */ 619 */
625static sense_reason_t 620static sense_reason_t
626core_alua_check_transition(int state, int *primary) 621core_alua_check_transition(int state, int *primary)
627{ 622{
628 switch (state) { 623 switch (state) {
629 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: 624 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
630 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 625 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
631 case ALUA_ACCESS_STATE_STANDBY: 626 case ALUA_ACCESS_STATE_STANDBY:
632 case ALUA_ACCESS_STATE_UNAVAILABLE: 627 case ALUA_ACCESS_STATE_UNAVAILABLE:
@@ -654,7 +649,7 @@ core_alua_check_transition(int state, int *primary)
654static char *core_alua_dump_state(int state) 649static char *core_alua_dump_state(int state)
655{ 650{
656 switch (state) { 651 switch (state) {
657 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: 652 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
658 return "Active/Optimized"; 653 return "Active/Optimized";
659 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 654 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
660 return "Active/NonOptimized"; 655 return "Active/NonOptimized";
@@ -676,10 +671,10 @@ char *core_alua_dump_status(int status)
676 switch (status) { 671 switch (status) {
677 case ALUA_STATUS_NONE: 672 case ALUA_STATUS_NONE:
678 return "None"; 673 return "None";
679 case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: 674 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
680 return "Altered by Explict STPG"; 675 return "Altered by Explicit STPG";
681 case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: 676 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
682 return "Altered by Implict ALUA"; 677 return "Altered by Implicit ALUA";
683 default: 678 default:
684 return "Unknown"; 679 return "Unknown";
685 } 680 }
@@ -770,7 +765,7 @@ static int core_alua_do_transition_tg_pt(
770 struct se_node_acl *nacl, 765 struct se_node_acl *nacl,
771 unsigned char *md_buf, 766 unsigned char *md_buf,
772 int new_state, 767 int new_state,
773 int explict) 768 int explicit)
774{ 769{
775 struct se_dev_entry *se_deve; 770 struct se_dev_entry *se_deve;
776 struct se_lun_acl *lacl; 771 struct se_lun_acl *lacl;
@@ -784,9 +779,9 @@ static int core_alua_do_transition_tg_pt(
784 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 779 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
785 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 780 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
786 ALUA_ACCESS_STATE_TRANSITION); 781 ALUA_ACCESS_STATE_TRANSITION);
787 tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? 782 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
788 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : 783 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
789 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; 784 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
790 /* 785 /*
791 * Check for the optional ALUA primary state transition delay 786 * Check for the optional ALUA primary state transition delay
792 */ 787 */
@@ -802,7 +797,7 @@ static int core_alua_do_transition_tg_pt(
802 * change, a device server shall establish a unit attention 797 * change, a device server shall establish a unit attention
803 * condition for the initiator port associated with every I_T 798 * condition for the initiator port associated with every I_T
804 * nexus with the additional sense code set to ASYMMETRIC 799 * nexus with the additional sense code set to ASYMMETRIC
805 * ACCESS STATE CHAGED. 800 * ACCESS STATE CHANGED.
806 * 801 *
807 * After an explicit target port asymmetric access state 802 * After an explicit target port asymmetric access state
808 * change, a device server shall establish a unit attention 803 * change, a device server shall establish a unit attention
@@ -821,12 +816,12 @@ static int core_alua_do_transition_tg_pt(
821 lacl = se_deve->se_lun_acl; 816 lacl = se_deve->se_lun_acl;
822 /* 817 /*
823 * se_deve->se_lun_acl pointer may be NULL for a 818 * se_deve->se_lun_acl pointer may be NULL for a
824 * entry created without explict Node+MappedLUN ACLs 819 * entry created without explicit Node+MappedLUN ACLs
825 */ 820 */
826 if (!lacl) 821 if (!lacl)
827 continue; 822 continue;
828 823
829 if (explict && 824 if (explicit &&
830 (nacl != NULL) && (nacl == lacl->se_lun_nacl) && 825 (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
831 (l_port != NULL) && (l_port == port)) 826 (l_port != NULL) && (l_port == port))
832 continue; 827 continue;
@@ -866,8 +861,8 @@ static int core_alua_do_transition_tg_pt(
866 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); 861 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
867 862
868 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 863 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
869 " from primary access state %s to %s\n", (explict) ? "explict" : 864 " from primary access state %s to %s\n", (explicit) ? "explicit" :
870 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 865 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
871 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), 866 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
872 core_alua_dump_state(new_state)); 867 core_alua_dump_state(new_state));
873 868
@@ -880,7 +875,7 @@ int core_alua_do_port_transition(
880 struct se_port *l_port, 875 struct se_port *l_port,
881 struct se_node_acl *l_nacl, 876 struct se_node_acl *l_nacl,
882 int new_state, 877 int new_state,
883 int explict) 878 int explicit)
884{ 879{
885 struct se_device *dev; 880 struct se_device *dev;
886 struct se_port *port; 881 struct se_port *port;
@@ -917,7 +912,7 @@ int core_alua_do_port_transition(
917 * success. 912 * success.
918 */ 913 */
919 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, 914 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
920 md_buf, new_state, explict); 915 md_buf, new_state, explicit);
921 atomic_dec(&lu_gp->lu_gp_ref_cnt); 916 atomic_dec(&lu_gp->lu_gp_ref_cnt);
922 smp_mb__after_atomic_dec(); 917 smp_mb__after_atomic_dec();
923 kfree(md_buf); 918 kfree(md_buf);
@@ -946,7 +941,7 @@ int core_alua_do_port_transition(
946 continue; 941 continue;
947 /* 942 /*
948 * If the target behavior port asymmetric access state 943 * If the target behavior port asymmetric access state
949 * is changed for any target port group accessiable via 944 * is changed for any target port group accessible via
950 * a logical unit within a LU group, the target port 945 * a logical unit within a LU group, the target port
951 * behavior group asymmetric access states for the same 946 * behavior group asymmetric access states for the same
952 * target port group accessible via other logical units 947 * target port group accessible via other logical units
@@ -970,7 +965,7 @@ int core_alua_do_port_transition(
970 * success. 965 * success.
971 */ 966 */
972 core_alua_do_transition_tg_pt(tg_pt_gp, port, 967 core_alua_do_transition_tg_pt(tg_pt_gp, port,
973 nacl, md_buf, new_state, explict); 968 nacl, md_buf, new_state, explicit);
974 969
975 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 970 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
976 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 971 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
@@ -987,7 +982,7 @@ int core_alua_do_port_transition(
987 pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 982 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
988 " Group IDs: %hu %s transition to primary state: %s\n", 983 " Group IDs: %hu %s transition to primary state: %s\n",
989 config_item_name(&lu_gp->lu_gp_group.cg_item), 984 config_item_name(&lu_gp->lu_gp_group.cg_item),
990 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", 985 l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit",
991 core_alua_dump_state(new_state)); 986 core_alua_dump_state(new_state));
992 987
993 atomic_dec(&lu_gp->lu_gp_ref_cnt); 988 atomic_dec(&lu_gp->lu_gp_ref_cnt);
@@ -1034,7 +1029,7 @@ static int core_alua_update_tpg_secondary_metadata(
1034static int core_alua_set_tg_pt_secondary_state( 1029static int core_alua_set_tg_pt_secondary_state(
1035 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1030 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1036 struct se_port *port, 1031 struct se_port *port,
1037 int explict, 1032 int explicit,
1038 int offline) 1033 int offline)
1039{ 1034{
1040 struct t10_alua_tg_pt_gp *tg_pt_gp; 1035 struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -1061,13 +1056,13 @@ static int core_alua_set_tg_pt_secondary_state(
1061 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 1056 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1062 1057
1063 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; 1058 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1064 port->sep_tg_pt_secondary_stat = (explict) ? 1059 port->sep_tg_pt_secondary_stat = (explicit) ?
1065 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : 1060 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1066 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; 1061 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1067 1062
1068 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1063 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1069 " to secondary access state: %s\n", (explict) ? "explict" : 1064 " to secondary access state: %s\n", (explicit) ? "explicit" :
1070 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1065 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1071 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1066 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1072 1067
1073 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1068 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1232,7 +1227,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1232 * struct se_device is released via core_alua_free_lu_gp_mem(). 1227 * struct se_device is released via core_alua_free_lu_gp_mem().
1233 * 1228 *
1234 * If the passed lu_gp does NOT match the default_lu_gp, assume 1229 * If the passed lu_gp does NOT match the default_lu_gp, assume
1235 * we want to re-assocate a given lu_gp_mem with default_lu_gp. 1230 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1236 */ 1231 */
1237 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1232 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1238 if (lu_gp != default_lu_gp) 1233 if (lu_gp != default_lu_gp)
@@ -1354,18 +1349,25 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1354 tg_pt_gp->tg_pt_gp_dev = dev; 1349 tg_pt_gp->tg_pt_gp_dev = dev;
1355 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; 1350 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1356 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1351 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1357 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); 1352 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1358 /* 1353 /*
1359 * Enable both explict and implict ALUA support by default 1354 * Enable both explicit and implicit ALUA support by default
1360 */ 1355 */
1361 tg_pt_gp->tg_pt_gp_alua_access_type = 1356 tg_pt_gp->tg_pt_gp_alua_access_type =
1362 TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; 1357 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1363 /* 1358 /*
1364 * Set the default Active/NonOptimized Delay in milliseconds 1359 * Set the default Active/NonOptimized Delay in milliseconds
1365 */ 1360 */
1366 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; 1361 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1367 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; 1362 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1368 tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; 1363 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1364
1365 /*
1366 * Enable all supported states
1367 */
1368 tg_pt_gp->tg_pt_gp_alua_supported_states =
1369 ALUA_T_SUP | ALUA_O_SUP |
1370 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1369 1371
1370 if (def_group) { 1372 if (def_group) {
1371 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1373 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1465,7 +1467,7 @@ void core_alua_free_tg_pt_gp(
1465 * been called from target_core_alua_drop_tg_pt_gp(). 1467 * been called from target_core_alua_drop_tg_pt_gp().
1466 * 1468 *
1467 * Here we remove *tg_pt_gp from the global list so that 1469 * Here we remove *tg_pt_gp from the global list so that
1468 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS 1470 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1469 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1471 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1470 */ 1472 */
1471 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1473 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1501,7 +1503,7 @@ void core_alua_free_tg_pt_gp(
1501 * core_alua_free_tg_pt_gp_mem(). 1503 * core_alua_free_tg_pt_gp_mem().
1502 * 1504 *
1503 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, 1505 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1504 * assume we want to re-assocate a given tg_pt_gp_mem with 1506 * assume we want to re-associate a given tg_pt_gp_mem with
1505 * default_tg_pt_gp. 1507 * default_tg_pt_gp.
1506 */ 1508 */
1507 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1509 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1740,13 +1742,13 @@ ssize_t core_alua_show_access_type(
1740 struct t10_alua_tg_pt_gp *tg_pt_gp, 1742 struct t10_alua_tg_pt_gp *tg_pt_gp,
1741 char *page) 1743 char *page)
1742{ 1744{
1743 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && 1745 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
1744 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) 1746 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
1745 return sprintf(page, "Implict and Explict\n"); 1747 return sprintf(page, "Implicit and Explicit\n");
1746 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) 1748 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
1747 return sprintf(page, "Implict\n"); 1749 return sprintf(page, "Implicit\n");
1748 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) 1750 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
1749 return sprintf(page, "Explict\n"); 1751 return sprintf(page, "Explicit\n");
1750 else 1752 else
1751 return sprintf(page, "None\n"); 1753 return sprintf(page, "None\n");
1752} 1754}
@@ -1771,11 +1773,11 @@ ssize_t core_alua_store_access_type(
1771 } 1773 }
1772 if (tmp == 3) 1774 if (tmp == 3)
1773 tg_pt_gp->tg_pt_gp_alua_access_type = 1775 tg_pt_gp->tg_pt_gp_alua_access_type =
1774 TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; 1776 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
1775 else if (tmp == 2) 1777 else if (tmp == 2)
1776 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; 1778 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
1777 else if (tmp == 1) 1779 else if (tmp == 1)
1778 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; 1780 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
1779 else 1781 else
1780 tg_pt_gp->tg_pt_gp_alua_access_type = 0; 1782 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1781 1783
@@ -1844,14 +1846,14 @@ ssize_t core_alua_store_trans_delay_msecs(
1844 return count; 1846 return count;
1845} 1847}
1846 1848
1847ssize_t core_alua_show_implict_trans_secs( 1849ssize_t core_alua_show_implicit_trans_secs(
1848 struct t10_alua_tg_pt_gp *tg_pt_gp, 1850 struct t10_alua_tg_pt_gp *tg_pt_gp,
1849 char *page) 1851 char *page)
1850{ 1852{
1851 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs); 1853 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
1852} 1854}
1853 1855
1854ssize_t core_alua_store_implict_trans_secs( 1856ssize_t core_alua_store_implicit_trans_secs(
1855 struct t10_alua_tg_pt_gp *tg_pt_gp, 1857 struct t10_alua_tg_pt_gp *tg_pt_gp,
1856 const char *page, 1858 const char *page,
1857 size_t count) 1859 size_t count)
@@ -1861,16 +1863,16 @@ ssize_t core_alua_store_implict_trans_secs(
1861 1863
1862 ret = kstrtoul(page, 0, &tmp); 1864 ret = kstrtoul(page, 0, &tmp);
1863 if (ret < 0) { 1865 if (ret < 0) {
1864 pr_err("Unable to extract implict_trans_secs\n"); 1866 pr_err("Unable to extract implicit_trans_secs\n");
1865 return ret; 1867 return ret;
1866 } 1868 }
1867 if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { 1869 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
1868 pr_err("Passed implict_trans_secs: %lu, exceeds" 1870 pr_err("Passed implicit_trans_secs: %lu, exceeds"
1869 " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp, 1871 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
1870 ALUA_MAX_IMPLICT_TRANS_SECS); 1872 ALUA_MAX_IMPLICIT_TRANS_SECS);
1871 return -EINVAL; 1873 return -EINVAL;
1872 } 1874 }
1873 tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp; 1875 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
1874 1876
1875 return count; 1877 return count;
1876} 1878}
@@ -1970,8 +1972,8 @@ ssize_t core_alua_store_secondary_status(
1970 return ret; 1972 return ret;
1971 } 1973 }
1972 if ((tmp != ALUA_STATUS_NONE) && 1974 if ((tmp != ALUA_STATUS_NONE) &&
1973 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 1975 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
1974 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 1976 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
1975 pr_err("Illegal value for alua_tg_pt_status: %lu\n", 1977 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1976 tmp); 1978 tmp);
1977 return -EINVAL; 1979 return -EINVAL;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index e539c3e7f4ad..88e2e835f14a 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -7,15 +7,15 @@
7 * from spc4r17 section 6.4.2 Table 135 7 * from spc4r17 section 6.4.2 Table 135
8 */ 8 */
9#define TPGS_NO_ALUA 0x00 9#define TPGS_NO_ALUA 0x00
10#define TPGS_IMPLICT_ALUA 0x10 10#define TPGS_IMPLICIT_ALUA 0x10
11#define TPGS_EXPLICT_ALUA 0x20 11#define TPGS_EXPLICIT_ALUA 0x20
12 12
13/* 13/*
14 * ASYMMETRIC ACCESS STATE field 14 * ASYMMETRIC ACCESS STATE field
15 * 15 *
16 * from spc4r17 section 6.27 Table 245 16 * from spc4r17 section 6.27 Table 245
17 */ 17 */
18#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0 18#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
19#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 19#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
20#define ALUA_ACCESS_STATE_STANDBY 0x2 20#define ALUA_ACCESS_STATE_STANDBY 0x2
21#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 21#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
@@ -23,13 +23,24 @@
23#define ALUA_ACCESS_STATE_TRANSITION 0xf 23#define ALUA_ACCESS_STATE_TRANSITION 0xf
24 24
25/* 25/*
26 * from spc4r36j section 6.37 Table 306
27 */
28#define ALUA_T_SUP 0x80
29#define ALUA_O_SUP 0x40
30#define ALUA_LBD_SUP 0x10
31#define ALUA_U_SUP 0x08
32#define ALUA_S_SUP 0x04
33#define ALUA_AN_SUP 0x02
34#define ALUA_AO_SUP 0x01
35
36/*
26 * REPORT_TARGET_PORT_GROUP STATUS CODE 37 * REPORT_TARGET_PORT_GROUP STATUS CODE
27 * 38 *
28 * from spc4r17 section 6.27 Table 246 39 * from spc4r17 section 6.27 Table 246
29 */ 40 */
30#define ALUA_STATUS_NONE 0x00 41#define ALUA_STATUS_NONE 0x00
31#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01 42#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01
32#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02 43#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02
33 44
34/* 45/*
35 * From spc4r17, Table D.1: ASC and ASCQ Assignement 46 * From spc4r17, Table D.1: ASC and ASCQ Assignement
@@ -46,17 +57,17 @@
46#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100 57#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
47#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */ 58#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
48/* 59/*
49 * Used for implict and explict ALUA transitional delay, that is disabled 60 * Used for implicit and explicit ALUA transitional delay, that is disabled
50 * by default, and is intended to be used for debugging client side ALUA code. 61 * by default, and is intended to be used for debugging client side ALUA code.
51 */ 62 */
52#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 63#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
53#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ 64#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
54/* 65/*
55 * Used for the recommended application client implict transition timeout 66 * Used for the recommended application client implicit transition timeout
56 * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header. 67 * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
57 */ 68 */
58#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0 69#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
59#define ALUA_MAX_IMPLICT_TRANS_SECS 255 70#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
60/* 71/*
61 * Used by core_alua_update_tpg_primary_metadata() and 72 * Used by core_alua_update_tpg_primary_metadata() and
62 * core_alua_update_tpg_secondary_metadata() 73 * core_alua_update_tpg_secondary_metadata()
@@ -113,9 +124,9 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
113 char *); 124 char *);
114extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, 125extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
115 const char *, size_t); 126 const char *, size_t);
116extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *, 127extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
117 char *); 128 char *);
118extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *, 129extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
119 const char *, size_t); 130 const char *, size_t);
120extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, 131extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
121 char *); 132 char *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 82e81c542e43..272755d03e5a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -177,16 +177,16 @@ static struct config_group *target_core_register_fabric(
177 * struct target_fabric_configfs *tf will contain a usage reference. 177 * struct target_fabric_configfs *tf will contain a usage reference.
178 */ 178 */
179 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", 179 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
180 &TF_CIT_TMPL(tf)->tfc_wwn_cit); 180 &tf->tf_cit_tmpl.tfc_wwn_cit);
181 181
182 tf->tf_group.default_groups = tf->tf_default_groups; 182 tf->tf_group.default_groups = tf->tf_default_groups;
183 tf->tf_group.default_groups[0] = &tf->tf_disc_group; 183 tf->tf_group.default_groups[0] = &tf->tf_disc_group;
184 tf->tf_group.default_groups[1] = NULL; 184 tf->tf_group.default_groups[1] = NULL;
185 185
186 config_group_init_type_name(&tf->tf_group, name, 186 config_group_init_type_name(&tf->tf_group, name,
187 &TF_CIT_TMPL(tf)->tfc_wwn_cit); 187 &tf->tf_cit_tmpl.tfc_wwn_cit);
188 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", 188 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
189 &TF_CIT_TMPL(tf)->tfc_discovery_cit); 189 &tf->tf_cit_tmpl.tfc_discovery_cit);
190 190
191 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" 191 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
192 " %s\n", tf->tf_group.cg_item.ci_name); 192 " %s\n", tf->tf_group.cg_item.ci_name);
@@ -2036,7 +2036,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2036 int new_state, ret; 2036 int new_state, ret;
2037 2037
2038 if (!tg_pt_gp->tg_pt_gp_valid_id) { 2038 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2039 pr_err("Unable to do implict ALUA on non valid" 2039 pr_err("Unable to do implicit ALUA on non valid"
2040 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); 2040 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2041 return -EINVAL; 2041 return -EINVAL;
2042 } 2042 }
@@ -2049,9 +2049,9 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2049 } 2049 }
2050 new_state = (int)tmp; 2050 new_state = (int)tmp;
2051 2051
2052 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { 2052 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
2053 pr_err("Unable to process implict configfs ALUA" 2053 pr_err("Unable to process implicit configfs ALUA"
2054 " transition while TPGS_IMPLICT_ALUA is disabled\n"); 2054 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
2055 return -EINVAL; 2055 return -EINVAL;
2056 } 2056 }
2057 2057
@@ -2097,8 +2097,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2097 new_status = (int)tmp; 2097 new_status = (int)tmp;
2098 2098
2099 if ((new_status != ALUA_STATUS_NONE) && 2099 if ((new_status != ALUA_STATUS_NONE) &&
2100 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 2100 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2101 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 2101 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2102 pr_err("Illegal ALUA access status: 0x%02x\n", 2102 pr_err("Illegal ALUA access status: 0x%02x\n",
2103 new_status); 2103 new_status);
2104 return -EINVAL; 2104 return -EINVAL;
@@ -2131,6 +2131,90 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
2131SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR); 2131SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
2132 2132
2133/* 2133/*
2134 * alua_supported_states
2135 */
2136
2137#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \
2138static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \
2139 struct t10_alua_tg_pt_gp *t, char *p) \
2140{ \
2141 return sprintf(p, "%d\n", !!(t->_var & _bit)); \
2142}
2143
2144#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \
2145static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
2146 struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \
2147{ \
2148 unsigned long tmp; \
2149 int ret; \
2150 \
2151 if (!t->tg_pt_gp_valid_id) { \
2152 pr_err("Unable to do set ##_name ALUA state on non" \
2153 " valid tg_pt_gp ID: %hu\n", \
2154 t->tg_pt_gp_valid_id); \
2155 return -EINVAL; \
2156 } \
2157 \
2158 ret = kstrtoul(p, 0, &tmp); \
2159 if (ret < 0) { \
2160 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
2161 return -EINVAL; \
2162 } \
2163 if (tmp > 1) { \
2164 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
2165 return -EINVAL; \
2166 } \
2167 if (!tmp) \
2168 t->_var |= _bit; \
2169 else \
2170 t->_var &= ~_bit; \
2171 \
2172 return c; \
2173}
2174
2175SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning,
2176 tg_pt_gp_alua_supported_states, ALUA_T_SUP);
2177SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning,
2178 tg_pt_gp_alua_supported_states, ALUA_T_SUP);
2179SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR);
2180
2181SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline,
2182 tg_pt_gp_alua_supported_states, ALUA_O_SUP);
2183SE_DEV_ALUA_SUPPORT_STATE_STORE(offline,
2184 tg_pt_gp_alua_supported_states, ALUA_O_SUP);
2185SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR);
2186
2187SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
2188 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
2189SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
2190 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
2191SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR);
2192
2193SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
2194 tg_pt_gp_alua_supported_states, ALUA_U_SUP);
2195SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable,
2196 tg_pt_gp_alua_supported_states, ALUA_U_SUP);
2197SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR);
2198
2199SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby,
2200 tg_pt_gp_alua_supported_states, ALUA_S_SUP);
2201SE_DEV_ALUA_SUPPORT_STATE_STORE(standby,
2202 tg_pt_gp_alua_supported_states, ALUA_S_SUP);
2203SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR);
2204
2205SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized,
2206 tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
2207SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized,
2208 tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
2209SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR);
2210
2211SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized,
2212 tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
2213SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized,
2214 tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
2215SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR);
2216
2217/*
2134 * alua_write_metadata 2218 * alua_write_metadata
2135 */ 2219 */
2136static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( 2220static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
@@ -2210,24 +2294,24 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
2210SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); 2294SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
2211 2295
2212/* 2296/*
2213 * implict_trans_secs 2297 * implicit_trans_secs
2214 */ 2298 */
2215static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs( 2299static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(
2216 struct t10_alua_tg_pt_gp *tg_pt_gp, 2300 struct t10_alua_tg_pt_gp *tg_pt_gp,
2217 char *page) 2301 char *page)
2218{ 2302{
2219 return core_alua_show_implict_trans_secs(tg_pt_gp, page); 2303 return core_alua_show_implicit_trans_secs(tg_pt_gp, page);
2220} 2304}
2221 2305
2222static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs( 2306static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(
2223 struct t10_alua_tg_pt_gp *tg_pt_gp, 2307 struct t10_alua_tg_pt_gp *tg_pt_gp,
2224 const char *page, 2308 const char *page,
2225 size_t count) 2309 size_t count)
2226{ 2310{
2227 return core_alua_store_implict_trans_secs(tg_pt_gp, page, count); 2311 return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count);
2228} 2312}
2229 2313
2230SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR); 2314SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR);
2231 2315
2232/* 2316/*
2233 * preferred 2317 * preferred
@@ -2350,10 +2434,17 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2350 &target_core_alua_tg_pt_gp_alua_access_state.attr, 2434 &target_core_alua_tg_pt_gp_alua_access_state.attr,
2351 &target_core_alua_tg_pt_gp_alua_access_status.attr, 2435 &target_core_alua_tg_pt_gp_alua_access_status.attr,
2352 &target_core_alua_tg_pt_gp_alua_access_type.attr, 2436 &target_core_alua_tg_pt_gp_alua_access_type.attr,
2437 &target_core_alua_tg_pt_gp_alua_support_transitioning.attr,
2438 &target_core_alua_tg_pt_gp_alua_support_offline.attr,
2439 &target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr,
2440 &target_core_alua_tg_pt_gp_alua_support_unavailable.attr,
2441 &target_core_alua_tg_pt_gp_alua_support_standby.attr,
2442 &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr,
2443 &target_core_alua_tg_pt_gp_alua_support_active_optimized.attr,
2353 &target_core_alua_tg_pt_gp_alua_write_metadata.attr, 2444 &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
2354 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, 2445 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
2355 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, 2446 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
2356 &target_core_alua_tg_pt_gp_implict_trans_secs.attr, 2447 &target_core_alua_tg_pt_gp_implicit_trans_secs.attr,
2357 &target_core_alua_tg_pt_gp_preferred.attr, 2448 &target_core_alua_tg_pt_gp_preferred.attr,
2358 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, 2449 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
2359 &target_core_alua_tg_pt_gp_members.attr, 2450 &target_core_alua_tg_pt_gp_members.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d90dbb0f1a69..207b340498a3 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -92,6 +92,9 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
92 se_cmd->pr_res_key = deve->pr_res_key; 92 se_cmd->pr_res_key = deve->pr_res_key;
93 se_cmd->orig_fe_lun = unpacked_lun; 93 se_cmd->orig_fe_lun = unpacked_lun;
94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
95
96 percpu_ref_get(&se_lun->lun_ref);
97 se_cmd->lun_ref_active = true;
95 } 98 }
96 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 99 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
97 100
@@ -119,24 +122,20 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
119 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 122 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
120 se_cmd->orig_fe_lun = 0; 123 se_cmd->orig_fe_lun = 0;
121 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 124 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
125
126 percpu_ref_get(&se_lun->lun_ref);
127 se_cmd->lun_ref_active = true;
122 } 128 }
123 129
124 /* Directly associate cmd with se_dev */ 130 /* Directly associate cmd with se_dev */
125 se_cmd->se_dev = se_lun->lun_se_dev; 131 se_cmd->se_dev = se_lun->lun_se_dev;
126 132
127 /* TODO: get rid of this and use atomics for stats */
128 dev = se_lun->lun_se_dev; 133 dev = se_lun->lun_se_dev;
129 spin_lock_irqsave(&dev->stats_lock, flags); 134 atomic_long_inc(&dev->num_cmds);
130 dev->num_cmds++;
131 if (se_cmd->data_direction == DMA_TO_DEVICE) 135 if (se_cmd->data_direction == DMA_TO_DEVICE)
132 dev->write_bytes += se_cmd->data_length; 136 atomic_long_add(se_cmd->data_length, &dev->write_bytes);
133 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 137 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
134 dev->read_bytes += se_cmd->data_length; 138 atomic_long_add(se_cmd->data_length, &dev->read_bytes);
135 spin_unlock_irqrestore(&dev->stats_lock, flags);
136
137 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
138 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
139 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
140 139
141 return 0; 140 return 0;
142} 141}
@@ -314,14 +313,14 @@ int core_enable_device_list_for_node(
314 deve = nacl->device_list[mapped_lun]; 313 deve = nacl->device_list[mapped_lun];
315 314
316 /* 315 /*
317 * Check if the call is handling demo mode -> explict LUN ACL 316 * Check if the call is handling demo mode -> explicit LUN ACL
318 * transition. This transition must be for the same struct se_lun 317 * transition. This transition must be for the same struct se_lun
319 * + mapped_lun that was setup in demo mode.. 318 * + mapped_lun that was setup in demo mode..
320 */ 319 */
321 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 320 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
322 if (deve->se_lun_acl != NULL) { 321 if (deve->se_lun_acl != NULL) {
323 pr_err("struct se_dev_entry->se_lun_acl" 322 pr_err("struct se_dev_entry->se_lun_acl"
324 " already set for demo mode -> explict" 323 " already set for demo mode -> explicit"
325 " LUN ACL transition\n"); 324 " LUN ACL transition\n");
326 spin_unlock_irq(&nacl->device_list_lock); 325 spin_unlock_irq(&nacl->device_list_lock);
327 return -EINVAL; 326 return -EINVAL;
@@ -329,7 +328,7 @@ int core_enable_device_list_for_node(
329 if (deve->se_lun != lun) { 328 if (deve->se_lun != lun) {
330 pr_err("struct se_dev_entry->se_lun does" 329 pr_err("struct se_dev_entry->se_lun does"
331 " match passed struct se_lun for demo mode" 330 " match passed struct se_lun for demo mode"
332 " -> explict LUN ACL transition\n"); 331 " -> explicit LUN ACL transition\n");
333 spin_unlock_irq(&nacl->device_list_lock); 332 spin_unlock_irq(&nacl->device_list_lock);
334 return -EINVAL; 333 return -EINVAL;
335 } 334 }
@@ -1407,6 +1406,7 @@ static void scsi_dump_inquiry(struct se_device *dev)
1407struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 1406struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1408{ 1407{
1409 struct se_device *dev; 1408 struct se_device *dev;
1409 struct se_lun *xcopy_lun;
1410 1410
1411 dev = hba->transport->alloc_device(hba, name); 1411 dev = hba->transport->alloc_device(hba, name);
1412 if (!dev) 1412 if (!dev)
@@ -1423,7 +1423,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1423 INIT_LIST_HEAD(&dev->state_list); 1423 INIT_LIST_HEAD(&dev->state_list);
1424 INIT_LIST_HEAD(&dev->qf_cmd_list); 1424 INIT_LIST_HEAD(&dev->qf_cmd_list);
1425 INIT_LIST_HEAD(&dev->g_dev_node); 1425 INIT_LIST_HEAD(&dev->g_dev_node);
1426 spin_lock_init(&dev->stats_lock);
1427 spin_lock_init(&dev->execute_task_lock); 1426 spin_lock_init(&dev->execute_task_lock);
1428 spin_lock_init(&dev->delayed_cmd_lock); 1427 spin_lock_init(&dev->delayed_cmd_lock);
1429 spin_lock_init(&dev->dev_reservation_lock); 1428 spin_lock_init(&dev->dev_reservation_lock);
@@ -1469,6 +1468,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1469 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; 1468 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1470 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; 1469 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1471 1470
1471 xcopy_lun = &dev->xcopy_lun;
1472 xcopy_lun->lun_se_dev = dev;
1473 init_completion(&xcopy_lun->lun_shutdown_comp);
1474 INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
1475 spin_lock_init(&xcopy_lun->lun_acl_lock);
1476 spin_lock_init(&xcopy_lun->lun_sep_lock);
1477 init_completion(&xcopy_lun->lun_ref_comp);
1478
1472 return dev; 1479 return dev;
1473} 1480}
1474 1481
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 3503996d7d10..dae2ad6a669e 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -385,9 +385,9 @@ static struct config_group *target_fabric_make_mappedlun(
385 } 385 }
386 386
387 config_group_init_type_name(&lacl->se_lun_group, name, 387 config_group_init_type_name(&lacl->se_lun_group, name,
388 &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); 388 &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit);
389 config_group_init_type_name(&lacl->ml_stat_grps.stat_group, 389 config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
390 "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit); 390 "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit);
391 lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; 391 lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
392 lacl_cg->default_groups[1] = NULL; 392 lacl_cg->default_groups[1] = NULL;
393 393
@@ -504,16 +504,16 @@ static struct config_group *target_fabric_make_nodeacl(
504 nacl_cg->default_groups[4] = NULL; 504 nacl_cg->default_groups[4] = NULL;
505 505
506 config_group_init_type_name(&se_nacl->acl_group, name, 506 config_group_init_type_name(&se_nacl->acl_group, name,
507 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); 507 &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit);
508 config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", 508 config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
509 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit); 509 &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit);
510 config_group_init_type_name(&se_nacl->acl_auth_group, "auth", 510 config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
511 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); 511 &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit);
512 config_group_init_type_name(&se_nacl->acl_param_group, "param", 512 config_group_init_type_name(&se_nacl->acl_param_group, "param",
513 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); 513 &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit);
514 config_group_init_type_name(&se_nacl->acl_fabric_stat_group, 514 config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
515 "fabric_statistics", 515 "fabric_statistics",
516 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit); 516 &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit);
517 517
518 return &se_nacl->acl_group; 518 return &se_nacl->acl_group;
519} 519}
@@ -595,7 +595,7 @@ static struct config_group *target_fabric_make_np(
595 595
596 se_tpg_np->tpg_np_parent = se_tpg; 596 se_tpg_np->tpg_np_parent = se_tpg;
597 config_group_init_type_name(&se_tpg_np->tpg_np_group, name, 597 config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
598 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); 598 &tf->tf_cit_tmpl.tfc_tpg_np_base_cit);
599 599
600 return &se_tpg_np->tpg_np_group; 600 return &se_tpg_np->tpg_np_group;
601} 601}
@@ -899,9 +899,9 @@ static struct config_group *target_fabric_make_lun(
899 } 899 }
900 900
901 config_group_init_type_name(&lun->lun_group, name, 901 config_group_init_type_name(&lun->lun_group, name,
902 &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); 902 &tf->tf_cit_tmpl.tfc_tpg_port_cit);
903 config_group_init_type_name(&lun->port_stat_grps.stat_group, 903 config_group_init_type_name(&lun->port_stat_grps.stat_group,
904 "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit); 904 "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit);
905 lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; 905 lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
906 lun_cg->default_groups[1] = NULL; 906 lun_cg->default_groups[1] = NULL;
907 907
@@ -1056,19 +1056,19 @@ static struct config_group *target_fabric_make_tpg(
1056 se_tpg->tpg_group.default_groups[6] = NULL; 1056 se_tpg->tpg_group.default_groups[6] = NULL;
1057 1057
1058 config_group_init_type_name(&se_tpg->tpg_group, name, 1058 config_group_init_type_name(&se_tpg->tpg_group, name,
1059 &TF_CIT_TMPL(tf)->tfc_tpg_base_cit); 1059 &tf->tf_cit_tmpl.tfc_tpg_base_cit);
1060 config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", 1060 config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
1061 &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit); 1061 &tf->tf_cit_tmpl.tfc_tpg_lun_cit);
1062 config_group_init_type_name(&se_tpg->tpg_np_group, "np", 1062 config_group_init_type_name(&se_tpg->tpg_np_group, "np",
1063 &TF_CIT_TMPL(tf)->tfc_tpg_np_cit); 1063 &tf->tf_cit_tmpl.tfc_tpg_np_cit);
1064 config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", 1064 config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
1065 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit); 1065 &tf->tf_cit_tmpl.tfc_tpg_nacl_cit);
1066 config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", 1066 config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
1067 &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit); 1067 &tf->tf_cit_tmpl.tfc_tpg_attrib_cit);
1068 config_group_init_type_name(&se_tpg->tpg_auth_group, "auth", 1068 config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
1069 &TF_CIT_TMPL(tf)->tfc_tpg_auth_cit); 1069 &tf->tf_cit_tmpl.tfc_tpg_auth_cit);
1070 config_group_init_type_name(&se_tpg->tpg_param_group, "param", 1070 config_group_init_type_name(&se_tpg->tpg_param_group, "param",
1071 &TF_CIT_TMPL(tf)->tfc_tpg_param_cit); 1071 &tf->tf_cit_tmpl.tfc_tpg_param_cit);
1072 1072
1073 return &se_tpg->tpg_group; 1073 return &se_tpg->tpg_group;
1074} 1074}
@@ -1155,9 +1155,9 @@ static struct config_group *target_fabric_make_wwn(
1155 wwn->wwn_group.default_groups[1] = NULL; 1155 wwn->wwn_group.default_groups[1] = NULL;
1156 1156
1157 config_group_init_type_name(&wwn->wwn_group, name, 1157 config_group_init_type_name(&wwn->wwn_group, name,
1158 &TF_CIT_TMPL(tf)->tfc_tpg_cit); 1158 &tf->tf_cit_tmpl.tfc_tpg_cit);
1159 config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", 1159 config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
1160 &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit); 1160 &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit);
1161 1161
1162 return &wwn->wwn_group; 1162 return &wwn->wwn_group;
1163} 1163}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b662f89dedac..0e34cda3271e 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -562,7 +562,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
562 } else { 562 } else {
563 ret = fd_do_rw(cmd, sgl, sgl_nents, 1); 563 ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
564 /* 564 /*
565 * Perform implict vfs_fsync_range() for fd_do_writev() ops 565 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
566 * for SCSI WRITEs with Forced Unit Access (FUA) set. 566 * for SCSI WRITEs with Forced Unit Access (FUA) set.
567 * Allow this to happen independent of WCE=0 setting. 567 * Allow this to happen independent of WCE=0 setting.
568 */ 568 */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index b9a3394fe479..c87959f12760 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -710,6 +710,45 @@ static sector_t iblock_get_blocks(struct se_device *dev)
710 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 710 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
711} 711}
712 712
713static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
714{
715 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
716 struct block_device *bd = ib_dev->ibd_bd;
717 int ret;
718
719 ret = bdev_alignment_offset(bd);
720 if (ret == -1)
721 return 0;
722
723 /* convert offset-bytes to offset-lbas */
724 return ret / bdev_logical_block_size(bd);
725}
726
727static unsigned int iblock_get_lbppbe(struct se_device *dev)
728{
729 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
730 struct block_device *bd = ib_dev->ibd_bd;
731 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
732
733 return ilog2(logs_per_phys);
734}
735
736static unsigned int iblock_get_io_min(struct se_device *dev)
737{
738 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
739 struct block_device *bd = ib_dev->ibd_bd;
740
741 return bdev_io_min(bd);
742}
743
744static unsigned int iblock_get_io_opt(struct se_device *dev)
745{
746 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
747 struct block_device *bd = ib_dev->ibd_bd;
748
749 return bdev_io_opt(bd);
750}
751
713static struct sbc_ops iblock_sbc_ops = { 752static struct sbc_ops iblock_sbc_ops = {
714 .execute_rw = iblock_execute_rw, 753 .execute_rw = iblock_execute_rw,
715 .execute_sync_cache = iblock_execute_sync_cache, 754 .execute_sync_cache = iblock_execute_sync_cache,
@@ -749,6 +788,10 @@ static struct se_subsystem_api iblock_template = {
749 .show_configfs_dev_params = iblock_show_configfs_dev_params, 788 .show_configfs_dev_params = iblock_show_configfs_dev_params,
750 .get_device_type = sbc_get_device_type, 789 .get_device_type = sbc_get_device_type,
751 .get_blocks = iblock_get_blocks, 790 .get_blocks = iblock_get_blocks,
791 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
792 .get_lbppbe = iblock_get_lbppbe,
793 .get_io_min = iblock_get_io_min,
794 .get_io_opt = iblock_get_io_opt,
752 .get_write_cache = iblock_get_write_cache, 795 .get_write_cache = iblock_get_write_cache,
753}; 796};
754 797
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 579128abe3f5..47b63b094cdc 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -75,8 +75,6 @@ extern struct se_device *g_lun0_dev;
75 75
76struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, 76struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
77 const char *); 77 const char *);
78struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
79 unsigned char *);
80void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); 78void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
81void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); 79void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
82struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); 80struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
@@ -102,7 +100,7 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
102int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); 100int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
103int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); 101int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
104bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); 102bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
105int transport_clear_lun_from_sessions(struct se_lun *); 103int transport_clear_lun_ref(struct se_lun *);
106void transport_send_task_abort(struct se_cmd *); 104void transport_send_task_abort(struct se_cmd *);
107sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 105sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
108void target_qf_do_work(struct work_struct *work); 106void target_qf_do_work(struct work_struct *work);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d1ae4c5c3ffd..2f5d77932c80 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -474,7 +474,7 @@ static int core_scsi3_pr_seq_non_holder(
474 * statement. 474 * statement.
475 */ 475 */
476 if (!ret && !other_cdb) { 476 if (!ret && !other_cdb) {
477 pr_debug("Allowing explict CDB: 0x%02x for %s" 477 pr_debug("Allowing explicit CDB: 0x%02x for %s"
478 " reservation holder\n", cdb[0], 478 " reservation holder\n", cdb[0],
479 core_scsi3_pr_dump_type(pr_reg_type)); 479 core_scsi3_pr_dump_type(pr_reg_type));
480 480
@@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder(
507 */ 507 */
508 508
509 if (!registered_nexus) { 509 if (!registered_nexus) {
510 pr_debug("Allowing implict CDB: 0x%02x" 510 pr_debug("Allowing implicit CDB: 0x%02x"
511 " for %s reservation on unregistered" 511 " for %s reservation on unregistered"
512 " nexus\n", cdb[0], 512 " nexus\n", cdb[0],
513 core_scsi3_pr_dump_type(pr_reg_type)); 513 core_scsi3_pr_dump_type(pr_reg_type));
@@ -522,7 +522,7 @@ static int core_scsi3_pr_seq_non_holder(
522 * allow commands from registered nexuses. 522 * allow commands from registered nexuses.
523 */ 523 */
524 524
525 pr_debug("Allowing implict CDB: 0x%02x for %s" 525 pr_debug("Allowing implicit CDB: 0x%02x for %s"
526 " reservation\n", cdb[0], 526 " reservation\n", cdb[0],
527 core_scsi3_pr_dump_type(pr_reg_type)); 527 core_scsi3_pr_dump_type(pr_reg_type));
528 528
@@ -683,7 +683,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
683 alua_port_list) { 683 alua_port_list) {
684 /* 684 /*
685 * This pointer will be NULL for demo mode MappedLUNs 685 * This pointer will be NULL for demo mode MappedLUNs
686 * that have not been make explict via a ConfigFS 686 * that have not been make explicit via a ConfigFS
687 * MappedLUN group for the SCSI Initiator Node ACL. 687 * MappedLUN group for the SCSI Initiator Node ACL.
688 */ 688 */
689 if (!deve_tmp->se_lun_acl) 689 if (!deve_tmp->se_lun_acl)
@@ -1158,7 +1158,7 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1158 smp_mb__after_atomic_dec(); 1158 smp_mb__after_atomic_dec();
1159} 1159}
1160 1160
1161static int core_scsi3_check_implict_release( 1161static int core_scsi3_check_implicit_release(
1162 struct se_device *dev, 1162 struct se_device *dev,
1163 struct t10_pr_registration *pr_reg) 1163 struct t10_pr_registration *pr_reg)
1164{ 1164{
@@ -1174,7 +1174,7 @@ static int core_scsi3_check_implict_release(
1174 } 1174 }
1175 if (pr_res_holder == pr_reg) { 1175 if (pr_res_holder == pr_reg) {
1176 /* 1176 /*
1177 * Perform an implict RELEASE if the registration that 1177 * Perform an implicit RELEASE if the registration that
1178 * is being released is holding the reservation. 1178 * is being released is holding the reservation.
1179 * 1179 *
1180 * From spc4r17, section 5.7.11.1: 1180 * From spc4r17, section 5.7.11.1:
@@ -1192,7 +1192,7 @@ static int core_scsi3_check_implict_release(
1192 * For 'All Registrants' reservation types, all existing 1192 * For 'All Registrants' reservation types, all existing
1193 * registrations are still processed as reservation holders 1193 * registrations are still processed as reservation holders
1194 * in core_scsi3_pr_seq_non_holder() after the initial 1194 * in core_scsi3_pr_seq_non_holder() after the initial
1195 * reservation holder is implictly released here. 1195 * reservation holder is implicitly released here.
1196 */ 1196 */
1197 } else if (pr_reg->pr_reg_all_tg_pt && 1197 } else if (pr_reg->pr_reg_all_tg_pt &&
1198 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, 1198 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
@@ -2125,7 +2125,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2125 /* 2125 /*
2126 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. 2126 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
2127 */ 2127 */
2128 pr_holder = core_scsi3_check_implict_release( 2128 pr_holder = core_scsi3_check_implicit_release(
2129 cmd->se_dev, pr_reg); 2129 cmd->se_dev, pr_reg);
2130 if (pr_holder < 0) { 2130 if (pr_holder < 0) {
2131 ret = TCM_RESERVATION_CONFLICT; 2131 ret = TCM_RESERVATION_CONFLICT;
@@ -2402,7 +2402,7 @@ static void __core_scsi3_complete_pro_release(
2402 struct se_device *dev, 2402 struct se_device *dev,
2403 struct se_node_acl *se_nacl, 2403 struct se_node_acl *se_nacl,
2404 struct t10_pr_registration *pr_reg, 2404 struct t10_pr_registration *pr_reg,
2405 int explict) 2405 int explicit)
2406{ 2406{
2407 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; 2407 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
2408 char i_buf[PR_REG_ISID_ID_LEN]; 2408 char i_buf[PR_REG_ISID_ID_LEN];
@@ -2416,7 +2416,7 @@ static void __core_scsi3_complete_pro_release(
2416 2416
2417 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" 2417 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
2418 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2418 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2419 tfo->get_fabric_name(), (explict) ? "explict" : "implict", 2419 tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
2420 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 2420 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
2421 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2421 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2422 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", 2422 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
@@ -2692,7 +2692,7 @@ static void __core_scsi3_complete_pro_preempt(
2692 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 2692 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2693 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); 2693 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
2694 /* 2694 /*
2695 * Do an implict RELEASE of the existing reservation. 2695 * Do an implicit RELEASE of the existing reservation.
2696 */ 2696 */
2697 if (dev->dev_pr_res_holder) 2697 if (dev->dev_pr_res_holder)
2698 __core_scsi3_complete_pro_release(dev, nacl, 2698 __core_scsi3_complete_pro_release(dev, nacl,
@@ -2845,7 +2845,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2845 * 5.7.11.4 Preempting, Table 52 and Figure 7. 2845 * 5.7.11.4 Preempting, Table 52 and Figure 7.
2846 * 2846 *
2847 * For a ZERO SA Reservation key, release 2847 * For a ZERO SA Reservation key, release
2848 * all other registrations and do an implict 2848 * all other registrations and do an implicit
2849 * release of active persistent reservation. 2849 * release of active persistent reservation.
2850 * 2850 *
2851 * For a non-ZERO SA Reservation key, only 2851 * For a non-ZERO SA Reservation key, only
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 131327ac7f5b..4ffe5f2ec0e9 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -27,7 +27,6 @@
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/parser.h> 28#include <linux/parser.h>
29#include <linux/timer.h> 29#include <linux/timer.h>
30#include <linux/blkdev.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32#include <linux/spinlock.h> 31#include <linux/spinlock.h>
33#include <scsi/scsi.h> 32#include <scsi/scsi.h>
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index d9b92b2c524d..52ae54e60105 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -105,12 +105,22 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
105 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 105 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
106 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 106 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
107 buf[11] = dev->dev_attrib.block_size & 0xff; 107 buf[11] = dev->dev_attrib.block_size & 0xff;
108
109 if (dev->transport->get_lbppbe)
110 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
111
112 if (dev->transport->get_alignment_offset_lbas) {
113 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
114 buf[14] = (lalba >> 8) & 0x3f;
115 buf[15] = lalba & 0xff;
116 }
117
108 /* 118 /*
109 * Set Thin Provisioning Enable bit following sbc3r22 in section 119 * Set Thin Provisioning Enable bit following sbc3r22 in section
110 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 120 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
111 */ 121 */
112 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 122 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
113 buf[14] = 0x80; 123 buf[14] |= 0x80;
114 124
115 rbuf = transport_kmap_data_sg(cmd); 125 rbuf = transport_kmap_data_sg(cmd);
116 if (rbuf) { 126 if (rbuf) {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 074539558a54..021c3f4a4f00 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -48,7 +48,7 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
48 buf[5] = 0x80; 48 buf[5] = 0x80;
49 49
50 /* 50 /*
51 * Set TPGS field for explict and/or implict ALUA access type 51 * Set TPGS field for explicit and/or implicit ALUA access type
52 * and opteration. 52 * and opteration.
53 * 53 *
54 * See spc4r17 section 6.4.2 Table 135 54 * See spc4r17 section 6.4.2 Table 135
@@ -452,6 +452,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
452 struct se_device *dev = cmd->se_dev; 452 struct se_device *dev = cmd->se_dev;
453 u32 max_sectors; 453 u32 max_sectors;
454 int have_tp = 0; 454 int have_tp = 0;
455 int opt, min;
455 456
456 /* 457 /*
457 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 458 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
@@ -475,7 +476,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
475 /* 476 /*
476 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 477 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
477 */ 478 */
478 put_unaligned_be16(1, &buf[6]); 479 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
480 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
481 else
482 put_unaligned_be16(1, &buf[6]);
479 483
480 /* 484 /*
481 * Set MAXIMUM TRANSFER LENGTH 485 * Set MAXIMUM TRANSFER LENGTH
@@ -487,7 +491,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
487 /* 491 /*
488 * Set OPTIMAL TRANSFER LENGTH 492 * Set OPTIMAL TRANSFER LENGTH
489 */ 493 */
490 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 494 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
495 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
496 else
497 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
491 498
492 /* 499 /*
493 * Exit now if we don't support TP. 500 * Exit now if we don't support TP.
@@ -1250,7 +1257,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1250 *size = (cdb[3] << 8) + cdb[4]; 1257 *size = (cdb[3] << 8) + cdb[4];
1251 1258
1252 /* 1259 /*
1253 * Do implict HEAD_OF_QUEUE processing for INQUIRY. 1260 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
1254 * See spc4r17 section 5.3 1261 * See spc4r17 section 5.3
1255 */ 1262 */
1256 cmd->sam_task_attr = MSG_HEAD_TAG; 1263 cmd->sam_task_attr = MSG_HEAD_TAG;
@@ -1284,7 +1291,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1284 cmd->execute_cmd = spc_emulate_report_luns; 1291 cmd->execute_cmd = spc_emulate_report_luns;
1285 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1292 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1286 /* 1293 /*
1287 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS 1294 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
1288 * See spc4r17 section 5.3 1295 * See spc4r17 section 5.3
1289 */ 1296 */
1290 cmd->sam_task_attr = MSG_HEAD_TAG; 1297 cmd->sam_task_attr = MSG_HEAD_TAG;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 9c642e02cba1..03538994d2f7 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -32,7 +32,6 @@
32#include <linux/utsname.h> 32#include <linux/utsname.h>
33#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/blkdev.h>
36#include <linux/configfs.h> 35#include <linux/configfs.h>
37#include <scsi/scsi.h> 36#include <scsi/scsi.h>
38#include <scsi/scsi_device.h> 37#include <scsi/scsi_device.h>
@@ -214,7 +213,8 @@ static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
214 struct se_device *dev = 213 struct se_device *dev =
215 container_of(sgrps, struct se_device, dev_stat_grps); 214 container_of(sgrps, struct se_device, dev_stat_grps);
216 215
217 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 216 return snprintf(page, PAGE_SIZE, "%lu\n",
217 atomic_long_read(&dev->num_resets));
218} 218}
219DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); 219DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
220 220
@@ -397,8 +397,8 @@ static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
397 container_of(sgrps, struct se_device, dev_stat_grps); 397 container_of(sgrps, struct se_device, dev_stat_grps);
398 398
399 /* scsiLuNumCommands */ 399 /* scsiLuNumCommands */
400 return snprintf(page, PAGE_SIZE, "%llu\n", 400 return snprintf(page, PAGE_SIZE, "%lu\n",
401 (unsigned long long)dev->num_cmds); 401 atomic_long_read(&dev->num_cmds));
402} 402}
403DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); 403DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
404 404
@@ -409,7 +409,8 @@ static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
409 container_of(sgrps, struct se_device, dev_stat_grps); 409 container_of(sgrps, struct se_device, dev_stat_grps);
410 410
411 /* scsiLuReadMegaBytes */ 411 /* scsiLuReadMegaBytes */
412 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); 412 return snprintf(page, PAGE_SIZE, "%lu\n",
413 atomic_long_read(&dev->read_bytes) >> 20);
413} 414}
414DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); 415DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
415 416
@@ -420,7 +421,8 @@ static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
420 container_of(sgrps, struct se_device, dev_stat_grps); 421 container_of(sgrps, struct se_device, dev_stat_grps);
421 422
422 /* scsiLuWrittenMegaBytes */ 423 /* scsiLuWrittenMegaBytes */
423 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); 424 return snprintf(page, PAGE_SIZE, "%lu\n",
425 atomic_long_read(&dev->write_bytes) >> 20);
424} 426}
425DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); 427DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
426 428
@@ -431,7 +433,7 @@ static ssize_t target_stat_scsi_lu_show_attr_resets(
431 container_of(sgrps, struct se_device, dev_stat_grps); 433 container_of(sgrps, struct se_device, dev_stat_grps);
432 434
433 /* scsiLuInResets */ 435 /* scsiLuInResets */
434 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 436 return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets));
435} 437}
436DEV_STAT_SCSI_LU_ATTR_RO(resets); 438DEV_STAT_SCSI_LU_ATTR_RO(resets);
437 439
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 250009909d49..70c638f730af 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -386,9 +386,7 @@ int core_tmr_lun_reset(
386 pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); 386 pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
387 } 387 }
388 388
389 spin_lock_irq(&dev->stats_lock); 389 atomic_long_inc(&dev->num_resets);
390 dev->num_resets++;
391 spin_unlock_irq(&dev->stats_lock);
392 390
393 pr_debug("LUN_RESET: %s for [%s] Complete\n", 391 pr_debug("LUN_RESET: %s for [%s] Complete\n",
394 (preempt_and_abort_list) ? "Preempt" : "TMR", 392 (preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index b9a6ec0aa5fe..f697f8baec54 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -116,6 +116,7 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
116 116
117 return acl; 117 return acl;
118} 118}
119EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
119 120
120/* core_tpg_add_node_to_devs(): 121/* core_tpg_add_node_to_devs():
121 * 122 *
@@ -633,6 +634,13 @@ int core_tpg_set_initiator_node_tag(
633} 634}
634EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); 635EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
635 636
637static void core_tpg_lun_ref_release(struct percpu_ref *ref)
638{
639 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
640
641 complete(&lun->lun_ref_comp);
642}
643
636static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) 644static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
637{ 645{
638 /* Set in core_dev_setup_virtual_lun0() */ 646 /* Set in core_dev_setup_virtual_lun0() */
@@ -646,15 +654,20 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
646 atomic_set(&lun->lun_acl_count, 0); 654 atomic_set(&lun->lun_acl_count, 0);
647 init_completion(&lun->lun_shutdown_comp); 655 init_completion(&lun->lun_shutdown_comp);
648 INIT_LIST_HEAD(&lun->lun_acl_list); 656 INIT_LIST_HEAD(&lun->lun_acl_list);
649 INIT_LIST_HEAD(&lun->lun_cmd_list);
650 spin_lock_init(&lun->lun_acl_lock); 657 spin_lock_init(&lun->lun_acl_lock);
651 spin_lock_init(&lun->lun_cmd_lock);
652 spin_lock_init(&lun->lun_sep_lock); 658 spin_lock_init(&lun->lun_sep_lock);
659 init_completion(&lun->lun_ref_comp);
653 660
654 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 661 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
655 if (ret < 0) 662 if (ret < 0)
656 return ret; 663 return ret;
657 664
665 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
666 if (ret < 0) {
667 percpu_ref_cancel_init(&lun->lun_ref);
668 return ret;
669 }
670
658 return 0; 671 return 0;
659} 672}
660 673
@@ -691,10 +704,9 @@ int core_tpg_register(
691 atomic_set(&lun->lun_acl_count, 0); 704 atomic_set(&lun->lun_acl_count, 0);
692 init_completion(&lun->lun_shutdown_comp); 705 init_completion(&lun->lun_shutdown_comp);
693 INIT_LIST_HEAD(&lun->lun_acl_list); 706 INIT_LIST_HEAD(&lun->lun_acl_list);
694 INIT_LIST_HEAD(&lun->lun_cmd_list);
695 spin_lock_init(&lun->lun_acl_lock); 707 spin_lock_init(&lun->lun_acl_lock);
696 spin_lock_init(&lun->lun_cmd_lock);
697 spin_lock_init(&lun->lun_sep_lock); 708 spin_lock_init(&lun->lun_sep_lock);
709 init_completion(&lun->lun_ref_comp);
698 } 710 }
699 711
700 se_tpg->se_tpg_type = se_tpg_type; 712 se_tpg->se_tpg_type = se_tpg_type;
@@ -815,10 +827,16 @@ int core_tpg_post_addlun(
815{ 827{
816 int ret; 828 int ret;
817 829
818 ret = core_dev_export(lun_ptr, tpg, lun); 830 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
819 if (ret < 0) 831 if (ret < 0)
820 return ret; 832 return ret;
821 833
834 ret = core_dev_export(lun_ptr, tpg, lun);
835 if (ret < 0) {
836 percpu_ref_cancel_init(&lun->lun_ref);
837 return ret;
838 }
839
822 spin_lock(&tpg->tpg_lun_lock); 840 spin_lock(&tpg->tpg_lun_lock);
823 lun->lun_access = lun_access; 841 lun->lun_access = lun_access;
824 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; 842 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
@@ -827,14 +845,6 @@ int core_tpg_post_addlun(
827 return 0; 845 return 0;
828} 846}
829 847
830static void core_tpg_shutdown_lun(
831 struct se_portal_group *tpg,
832 struct se_lun *lun)
833{
834 core_clear_lun_from_tpg(lun, tpg);
835 transport_clear_lun_from_sessions(lun);
836}
837
838struct se_lun *core_tpg_pre_dellun( 848struct se_lun *core_tpg_pre_dellun(
839 struct se_portal_group *tpg, 849 struct se_portal_group *tpg,
840 u32 unpacked_lun) 850 u32 unpacked_lun)
@@ -869,7 +879,8 @@ int core_tpg_post_dellun(
869 struct se_portal_group *tpg, 879 struct se_portal_group *tpg,
870 struct se_lun *lun) 880 struct se_lun *lun)
871{ 881{
872 core_tpg_shutdown_lun(tpg, lun); 882 core_clear_lun_from_tpg(lun, tpg);
883 transport_clear_lun_ref(lun);
873 884
874 core_dev_unexport(lun->lun_se_dev, tpg, lun); 885 core_dev_unexport(lun->lun_se_dev, tpg, lun);
875 886
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 81e945eefbbd..91953da0f623 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -28,7 +28,6 @@
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/timer.h> 29#include <linux/timer.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/blkdev.h>
32#include <linux/spinlock.h> 31#include <linux/spinlock.h>
33#include <linux/kthread.h> 32#include <linux/kthread.h>
34#include <linux/in.h> 33#include <linux/in.h>
@@ -473,7 +472,7 @@ void transport_deregister_session(struct se_session *se_sess)
473 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 472 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
474 se_tpg->se_tpg_tfo->get_fabric_name()); 473 se_tpg->se_tpg_tfo->get_fabric_name());
475 /* 474 /*
476 * If last kref is dropping now for an explict NodeACL, awake sleeping 475 * If last kref is dropping now for an explicit NodeACL, awake sleeping
477 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 476 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
478 * removal context. 477 * removal context.
479 */ 478 */
@@ -515,23 +514,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
515 if (write_pending) 514 if (write_pending)
516 cmd->t_state = TRANSPORT_WRITE_PENDING; 515 cmd->t_state = TRANSPORT_WRITE_PENDING;
517 516
518 /*
519 * Determine if IOCTL context caller in requesting the stopping of this
520 * command for LUN shutdown purposes.
521 */
522 if (cmd->transport_state & CMD_T_LUN_STOP) {
523 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
524 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
525
526 cmd->transport_state &= ~CMD_T_ACTIVE;
527 if (remove_from_lists)
528 target_remove_from_state_list(cmd);
529 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
530
531 complete(&cmd->transport_lun_stop_comp);
532 return 1;
533 }
534
535 if (remove_from_lists) { 517 if (remove_from_lists) {
536 target_remove_from_state_list(cmd); 518 target_remove_from_state_list(cmd);
537 519
@@ -585,15 +567,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
585static void transport_lun_remove_cmd(struct se_cmd *cmd) 567static void transport_lun_remove_cmd(struct se_cmd *cmd)
586{ 568{
587 struct se_lun *lun = cmd->se_lun; 569 struct se_lun *lun = cmd->se_lun;
588 unsigned long flags;
589 570
590 if (!lun) 571 if (!lun || !cmd->lun_ref_active)
591 return; 572 return;
592 573
593 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 574 percpu_ref_put(&lun->lun_ref);
594 if (!list_empty(&cmd->se_lun_node))
595 list_del_init(&cmd->se_lun_node);
596 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
597} 575}
598 576
599void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 577void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
@@ -668,7 +646,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
668 cmd->transport_state |= CMD_T_FAILED; 646 cmd->transport_state |= CMD_T_FAILED;
669 647
670 /* 648 /*
671 * Check for case where an explict ABORT_TASK has been received 649 * Check for case where an explicit ABORT_TASK has been received
672 * and transport_wait_for_tasks() will be waiting for completion.. 650 * and transport_wait_for_tasks() will be waiting for completion..
673 */ 651 */
674 if (cmd->transport_state & CMD_T_ABORTED && 652 if (cmd->transport_state & CMD_T_ABORTED &&
@@ -1092,13 +1070,10 @@ void transport_init_se_cmd(
1092 int task_attr, 1070 int task_attr,
1093 unsigned char *sense_buffer) 1071 unsigned char *sense_buffer)
1094{ 1072{
1095 INIT_LIST_HEAD(&cmd->se_lun_node);
1096 INIT_LIST_HEAD(&cmd->se_delayed_node); 1073 INIT_LIST_HEAD(&cmd->se_delayed_node);
1097 INIT_LIST_HEAD(&cmd->se_qf_node); 1074 INIT_LIST_HEAD(&cmd->se_qf_node);
1098 INIT_LIST_HEAD(&cmd->se_cmd_list); 1075 INIT_LIST_HEAD(&cmd->se_cmd_list);
1099 INIT_LIST_HEAD(&cmd->state_list); 1076 INIT_LIST_HEAD(&cmd->state_list);
1100 init_completion(&cmd->transport_lun_fe_stop_comp);
1101 init_completion(&cmd->transport_lun_stop_comp);
1102 init_completion(&cmd->t_transport_stop_comp); 1077 init_completion(&cmd->t_transport_stop_comp);
1103 init_completion(&cmd->cmd_wait_comp); 1078 init_completion(&cmd->cmd_wait_comp);
1104 init_completion(&cmd->task_stop_comp); 1079 init_completion(&cmd->task_stop_comp);
@@ -1719,29 +1694,14 @@ void target_execute_cmd(struct se_cmd *cmd)
1719 /* 1694 /*
1720 * If the received CDB has aleady been aborted stop processing it here. 1695 * If the received CDB has aleady been aborted stop processing it here.
1721 */ 1696 */
1722 if (transport_check_aborted_status(cmd, 1)) { 1697 if (transport_check_aborted_status(cmd, 1))
1723 complete(&cmd->transport_lun_stop_comp);
1724 return; 1698 return;
1725 }
1726 1699
1727 /* 1700 /*
1728 * Determine if IOCTL context caller in requesting the stopping of this
1729 * command for LUN shutdown purposes.
1730 */
1731 spin_lock_irq(&cmd->t_state_lock);
1732 if (cmd->transport_state & CMD_T_LUN_STOP) {
1733 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
1734 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
1735
1736 cmd->transport_state &= ~CMD_T_ACTIVE;
1737 spin_unlock_irq(&cmd->t_state_lock);
1738 complete(&cmd->transport_lun_stop_comp);
1739 return;
1740 }
1741 /*
1742 * Determine if frontend context caller is requesting the stopping of 1701 * Determine if frontend context caller is requesting the stopping of
1743 * this command for frontend exceptions. 1702 * this command for frontend exceptions.
1744 */ 1703 */
1704 spin_lock_irq(&cmd->t_state_lock);
1745 if (cmd->transport_state & CMD_T_STOP) { 1705 if (cmd->transport_state & CMD_T_STOP) {
1746 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 1706 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
1747 __func__, __LINE__, 1707 __func__, __LINE__,
@@ -2404,164 +2364,23 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2404} 2364}
2405EXPORT_SYMBOL(target_wait_for_sess_cmds); 2365EXPORT_SYMBOL(target_wait_for_sess_cmds);
2406 2366
2407/* transport_lun_wait_for_tasks(): 2367static int transport_clear_lun_ref_thread(void *p)
2408 *
2409 * Called from ConfigFS context to stop the passed struct se_cmd to allow
2410 * an struct se_lun to be successfully shutdown.
2411 */
2412static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
2413{
2414 unsigned long flags;
2415 int ret = 0;
2416
2417 /*
2418 * If the frontend has already requested this struct se_cmd to
2419 * be stopped, we can safely ignore this struct se_cmd.
2420 */
2421 spin_lock_irqsave(&cmd->t_state_lock, flags);
2422 if (cmd->transport_state & CMD_T_STOP) {
2423 cmd->transport_state &= ~CMD_T_LUN_STOP;
2424
2425 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
2426 cmd->se_tfo->get_task_tag(cmd));
2427 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2428 transport_cmd_check_stop(cmd, false, false);
2429 return -EPERM;
2430 }
2431 cmd->transport_state |= CMD_T_LUN_FE_STOP;
2432 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2433
2434 // XXX: audit task_flags checks.
2435 spin_lock_irqsave(&cmd->t_state_lock, flags);
2436 if ((cmd->transport_state & CMD_T_BUSY) &&
2437 (cmd->transport_state & CMD_T_SENT)) {
2438 if (!target_stop_cmd(cmd, &flags))
2439 ret++;
2440 }
2441 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2442
2443 pr_debug("ConfigFS: cmd: %p stop tasks ret:"
2444 " %d\n", cmd, ret);
2445 if (!ret) {
2446 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
2447 cmd->se_tfo->get_task_tag(cmd));
2448 wait_for_completion(&cmd->transport_lun_stop_comp);
2449 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
2450 cmd->se_tfo->get_task_tag(cmd));
2451 }
2452
2453 return 0;
2454}
2455
2456static void __transport_clear_lun_from_sessions(struct se_lun *lun)
2457{
2458 struct se_cmd *cmd = NULL;
2459 unsigned long lun_flags, cmd_flags;
2460 /*
2461 * Do exception processing and return CHECK_CONDITION status to the
2462 * Initiator Port.
2463 */
2464 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2465 while (!list_empty(&lun->lun_cmd_list)) {
2466 cmd = list_first_entry(&lun->lun_cmd_list,
2467 struct se_cmd, se_lun_node);
2468 list_del_init(&cmd->se_lun_node);
2469
2470 spin_lock(&cmd->t_state_lock);
2471 pr_debug("SE_LUN[%d] - Setting cmd->transport"
2472 "_lun_stop for ITT: 0x%08x\n",
2473 cmd->se_lun->unpacked_lun,
2474 cmd->se_tfo->get_task_tag(cmd));
2475 cmd->transport_state |= CMD_T_LUN_STOP;
2476 spin_unlock(&cmd->t_state_lock);
2477
2478 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
2479
2480 if (!cmd->se_lun) {
2481 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
2482 cmd->se_tfo->get_task_tag(cmd),
2483 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2484 BUG();
2485 }
2486 /*
2487 * If the Storage engine still owns the iscsi_cmd_t, determine
2488 * and/or stop its context.
2489 */
2490 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
2491 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
2492 cmd->se_tfo->get_task_tag(cmd));
2493
2494 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
2495 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2496 continue;
2497 }
2498
2499 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
2500 "_wait_for_tasks(): SUCCESS\n",
2501 cmd->se_lun->unpacked_lun,
2502 cmd->se_tfo->get_task_tag(cmd));
2503
2504 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
2505 if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
2506 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2507 goto check_cond;
2508 }
2509 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
2510 target_remove_from_state_list(cmd);
2511 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2512
2513 /*
2514 * The Storage engine stopped this struct se_cmd before it was
2515 * send to the fabric frontend for delivery back to the
2516 * Initiator Node. Return this SCSI CDB back with an
2517 * CHECK_CONDITION status.
2518 */
2519check_cond:
2520 transport_send_check_condition_and_sense(cmd,
2521 TCM_NON_EXISTENT_LUN, 0);
2522 /*
2523 * If the fabric frontend is waiting for this iscsi_cmd_t to
2524 * be released, notify the waiting thread now that LU has
2525 * finished accessing it.
2526 */
2527 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
2528 if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
2529 pr_debug("SE_LUN[%d] - Detected FE stop for"
2530 " struct se_cmd: %p ITT: 0x%08x\n",
2531 lun->unpacked_lun,
2532 cmd, cmd->se_tfo->get_task_tag(cmd));
2533
2534 spin_unlock_irqrestore(&cmd->t_state_lock,
2535 cmd_flags);
2536 transport_cmd_check_stop(cmd, false, false);
2537 complete(&cmd->transport_lun_fe_stop_comp);
2538 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2539 continue;
2540 }
2541 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
2542 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
2543
2544 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2545 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2546 }
2547 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
2548}
2549
2550static int transport_clear_lun_thread(void *p)
2551{ 2368{
2552 struct se_lun *lun = p; 2369 struct se_lun *lun = p;
2553 2370
2554 __transport_clear_lun_from_sessions(lun); 2371 percpu_ref_kill(&lun->lun_ref);
2372
2373 wait_for_completion(&lun->lun_ref_comp);
2555 complete(&lun->lun_shutdown_comp); 2374 complete(&lun->lun_shutdown_comp);
2556 2375
2557 return 0; 2376 return 0;
2558} 2377}
2559 2378
2560int transport_clear_lun_from_sessions(struct se_lun *lun) 2379int transport_clear_lun_ref(struct se_lun *lun)
2561{ 2380{
2562 struct task_struct *kt; 2381 struct task_struct *kt;
2563 2382
2564 kt = kthread_run(transport_clear_lun_thread, lun, 2383 kt = kthread_run(transport_clear_lun_ref_thread, lun,
2565 "tcm_cl_%u", lun->unpacked_lun); 2384 "tcm_cl_%u", lun->unpacked_lun);
2566 if (IS_ERR(kt)) { 2385 if (IS_ERR(kt)) {
2567 pr_err("Unable to start clear_lun thread\n"); 2386 pr_err("Unable to start clear_lun thread\n");
@@ -2595,43 +2414,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
2595 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2414 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2596 return false; 2415 return false;
2597 } 2416 }
2598 /*
2599 * If we are already stopped due to an external event (ie: LUN shutdown)
2600 * sleep until the connection can have the passed struct se_cmd back.
2601 * The cmd->transport_lun_stopped_sem will be upped by
2602 * transport_clear_lun_from_sessions() once the ConfigFS context caller
2603 * has completed its operation on the struct se_cmd.
2604 */
2605 if (cmd->transport_state & CMD_T_LUN_STOP) {
2606 pr_debug("wait_for_tasks: Stopping"
2607 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
2608 "_stop_comp); for ITT: 0x%08x\n",
2609 cmd->se_tfo->get_task_tag(cmd));
2610 /*
2611 * There is a special case for WRITES where a FE exception +
2612 * LUN shutdown means ConfigFS context is still sleeping on
2613 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
2614 * We go ahead and up transport_lun_stop_comp just to be sure
2615 * here.
2616 */
2617 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2618 complete(&cmd->transport_lun_stop_comp);
2619 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
2620 spin_lock_irqsave(&cmd->t_state_lock, flags);
2621
2622 target_remove_from_state_list(cmd);
2623 /*
2624 * At this point, the frontend who was the originator of this
2625 * struct se_cmd, now owns the structure and can be released through
2626 * normal means below.
2627 */
2628 pr_debug("wait_for_tasks: Stopped"
2629 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
2630 "stop_comp); for ITT: 0x%08x\n",
2631 cmd->se_tfo->get_task_tag(cmd));
2632
2633 cmd->transport_state &= ~CMD_T_LUN_STOP;
2634 }
2635 2417
2636 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2418 if (!(cmd->transport_state & CMD_T_ACTIVE)) {
2637 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2419 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -2910,6 +2692,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2910 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); 2692 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
2911 2693
2912 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 2694 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
2695 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2913 trace_target_cmd_complete(cmd); 2696 trace_target_cmd_complete(cmd);
2914 cmd->se_tfo->queue_status(cmd); 2697 cmd->se_tfo->queue_status(cmd);
2915 2698
@@ -2938,6 +2721,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
2938 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2721 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2939 cmd->transport_state |= CMD_T_ABORTED; 2722 cmd->transport_state |= CMD_T_ABORTED;
2940 smp_mb__after_atomic_inc(); 2723 smp_mb__after_atomic_inc();
2724 return;
2941 } 2725 }
2942 } 2726 }
2943 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2727 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index 0204952fe4d3..be912b36daae 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -19,7 +19,7 @@
19#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04 19#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
20#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05 20#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
21#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06 21#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
22#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 22#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
23#define ASCQ_2AH_PRIORITY_CHANGED 0x08 23#define ASCQ_2AH_PRIORITY_CHANGED 0x08
24 24
25#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09 25#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 474cd44fac14..6b88a9958f61 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -405,9 +405,6 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
405 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 405 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
406 struct xcopy_pt_cmd, se_cmd); 406 struct xcopy_pt_cmd, se_cmd);
407 407
408 if (xpt_cmd->remote_port)
409 kfree(se_cmd->se_lun);
410
411 kfree(xpt_cmd); 408 kfree(xpt_cmd);
412} 409}
413 410
@@ -572,22 +569,10 @@ static int target_xcopy_init_pt_lun(
572 return 0; 569 return 0;
573 } 570 }
574 571
575 pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL); 572 pt_cmd->se_lun = &se_dev->xcopy_lun;
576 if (!pt_cmd->se_lun) {
577 pr_err("Unable to allocate pt_cmd->se_lun\n");
578 return -ENOMEM;
579 }
580 init_completion(&pt_cmd->se_lun->lun_shutdown_comp);
581 INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list);
582 INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list);
583 spin_lock_init(&pt_cmd->se_lun->lun_acl_lock);
584 spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock);
585 spin_lock_init(&pt_cmd->se_lun->lun_sep_lock);
586
587 pt_cmd->se_dev = se_dev; 573 pt_cmd->se_dev = se_dev;
588 574
589 pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); 575 pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
590 pt_cmd->se_lun->lun_se_dev = se_dev;
591 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; 576 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
592 577
593 pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", 578 pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
@@ -658,8 +643,6 @@ static int target_xcopy_setup_pt_cmd(
658 return 0; 643 return 0;
659 644
660out: 645out:
661 if (remote_port == true)
662 kfree(cmd->se_lun);
663 return ret; 646 return ret;
664} 647}
665 648
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 0dd54a44abcf..752863acecb8 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -22,6 +22,7 @@
22#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */ 22#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
23#define FT_TPG_NAMELEN 32 /* max length of TPG name */ 23#define FT_TPG_NAMELEN 32 /* max length of TPG name */
24#define FT_LUN_NAMELEN 32 /* max length of LUN name */ 24#define FT_LUN_NAMELEN 32 /* max length of LUN name */
25#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */
25 26
26struct ft_transport_id { 27struct ft_transport_id {
27 __u8 format; 28 __u8 format;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 0e5a1caed176..479ec5621a4e 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -28,6 +28,7 @@
28#include <linux/configfs.h> 28#include <linux/configfs.h>
29#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/hash.h> 30#include <linux/hash.h>
31#include <linux/percpu_ida.h>
31#include <asm/unaligned.h> 32#include <asm/unaligned.h>
32#include <scsi/scsi.h> 33#include <scsi/scsi.h>
33#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
@@ -89,16 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
89{ 90{
90 struct fc_frame *fp; 91 struct fc_frame *fp;
91 struct fc_lport *lport; 92 struct fc_lport *lport;
93 struct se_session *se_sess;
92 94
93 if (!cmd) 95 if (!cmd)
94 return; 96 return;
97 se_sess = cmd->sess->se_sess;
95 fp = cmd->req_frame; 98 fp = cmd->req_frame;
96 lport = fr_dev(fp); 99 lport = fr_dev(fp);
97 if (fr_seq(fp)) 100 if (fr_seq(fp))
98 lport->tt.seq_release(fr_seq(fp)); 101 lport->tt.seq_release(fr_seq(fp));
99 fc_frame_free(fp); 102 fc_frame_free(fp);
103 percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
100 ft_sess_put(cmd->sess); /* undo get from lookup at recv */ 104 ft_sess_put(cmd->sess); /* undo get from lookup at recv */
101 kfree(cmd);
102} 105}
103 106
104void ft_release_cmd(struct se_cmd *se_cmd) 107void ft_release_cmd(struct se_cmd *se_cmd)
@@ -432,14 +435,21 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
432{ 435{
433 struct ft_cmd *cmd; 436 struct ft_cmd *cmd;
434 struct fc_lport *lport = sess->tport->lport; 437 struct fc_lport *lport = sess->tport->lport;
438 struct se_session *se_sess = sess->se_sess;
439 int tag;
435 440
436 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); 441 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
437 if (!cmd) 442 if (tag < 0)
438 goto busy; 443 goto busy;
444
445 cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
446 memset(cmd, 0, sizeof(struct ft_cmd));
447
448 cmd->se_cmd.map_tag = tag;
439 cmd->sess = sess; 449 cmd->sess = sess;
440 cmd->seq = lport->tt.seq_assign(lport, fp); 450 cmd->seq = lport->tt.seq_assign(lport, fp);
441 if (!cmd->seq) { 451 if (!cmd->seq) {
442 kfree(cmd); 452 percpu_ida_free(&se_sess->sess_tag_pool, tag);
443 goto busy; 453 goto busy;
444 } 454 }
445 cmd->req_frame = fp; /* hold frame during cmd */ 455 cmd->req_frame = fp; /* hold frame during cmd */
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 4e0050840a72..c6932fb53a8d 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -571,16 +571,16 @@ int ft_register_configfs(void)
571 /* 571 /*
572 * Setup default attribute lists for various fabric->tf_cit_tmpl 572 * Setup default attribute lists for various fabric->tf_cit_tmpl
573 */ 573 */
574 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs; 574 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
575 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL; 575 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
576 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 576 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
577 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 577 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
578 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 578 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
579 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = 579 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =
580 ft_nacl_base_attrs; 580 ft_nacl_base_attrs;
581 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 581 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
582 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 582 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
583 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 583 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
584 /* 584 /*
585 * register the fabric for use within TCM 585 * register the fabric for use within TCM
586 */ 586 */
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 4859505ae2ed..ae52c08dad09 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -210,7 +210,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
210 if (!sess) 210 if (!sess)
211 return NULL; 211 return NULL;
212 212
213 sess->se_sess = transport_init_session(); 213 sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
214 sizeof(struct ft_cmd));
214 if (IS_ERR(sess->se_sess)) { 215 if (IS_ERR(sess->se_sess)) {
215 kfree(sess); 216 kfree(sess);
216 return NULL; 217 return NULL;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 03a567199bbe..f1d511a9475b 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1608,15 +1608,17 @@ exit:
1608EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); 1608EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
1609 1609
1610#ifdef CONFIG_NET 1610#ifdef CONFIG_NET
1611static const struct genl_multicast_group thermal_event_mcgrps[] = {
1612 { .name = THERMAL_GENL_MCAST_GROUP_NAME, },
1613};
1614
1611static struct genl_family thermal_event_genl_family = { 1615static struct genl_family thermal_event_genl_family = {
1612 .id = GENL_ID_GENERATE, 1616 .id = GENL_ID_GENERATE,
1613 .name = THERMAL_GENL_FAMILY_NAME, 1617 .name = THERMAL_GENL_FAMILY_NAME,
1614 .version = THERMAL_GENL_VERSION, 1618 .version = THERMAL_GENL_VERSION,
1615 .maxattr = THERMAL_GENL_ATTR_MAX, 1619 .maxattr = THERMAL_GENL_ATTR_MAX,
1616}; 1620 .mcgrps = thermal_event_mcgrps,
1617 1621 .n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps),
1618static struct genl_multicast_group thermal_event_mcgrp = {
1619 .name = THERMAL_GENL_MCAST_GROUP_NAME,
1620}; 1622};
1621 1623
1622int thermal_generate_netlink_event(struct thermal_zone_device *tz, 1624int thermal_generate_netlink_event(struct thermal_zone_device *tz,
@@ -1677,7 +1679,8 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz,
1677 return result; 1679 return result;
1678 } 1680 }
1679 1681
1680 result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); 1682 result = genlmsg_multicast(&thermal_event_genl_family, skb, 0,
1683 0, GFP_ATOMIC);
1681 if (result) 1684 if (result)
1682 dev_err(&tz->device, "Failed to send netlink event:%d", result); 1685 dev_err(&tz->device, "Failed to send netlink event:%d", result);
1683 1686
@@ -1687,17 +1690,7 @@ EXPORT_SYMBOL_GPL(thermal_generate_netlink_event);
1687 1690
1688static int genetlink_init(void) 1691static int genetlink_init(void)
1689{ 1692{
1690 int result; 1693 return genl_register_family(&thermal_event_genl_family);
1691
1692 result = genl_register_family(&thermal_event_genl_family);
1693 if (result)
1694 return result;
1695
1696 result = genl_register_mc_group(&thermal_event_genl_family,
1697 &thermal_event_mcgrp);
1698 if (result)
1699 genl_unregister_family(&thermal_event_genl_family);
1700 return result;
1701} 1694}
1702 1695
1703static void genetlink_exit(void) 1696static void genetlink_exit(void)
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 2b86f8e0fb58..71630a2af42c 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1855,6 +1855,9 @@ static struct console sercons = {
1855 */ 1855 */
1856static int __init amiserial_console_init(void) 1856static int __init amiserial_console_init(void)
1857{ 1857{
1858 if (!MACH_IS_AMIGA)
1859 return -ENODEV;
1860
1858 register_console(&sercons); 1861 register_console(&sercons);
1859 return 0; 1862 return 0;
1860} 1863}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 7cdd1eb9406c..268b62768f2b 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -768,7 +768,7 @@ static size_t __process_echoes(struct tty_struct *tty)
768 * data at the tail to prevent a subsequent overrun */ 768 * data at the tail to prevent a subsequent overrun */
769 while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { 769 while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
770 if (echo_buf(ldata, tail) == ECHO_OP_START) { 770 if (echo_buf(ldata, tail) == ECHO_OP_START) {
771 if (echo_buf(ldata, tail) == ECHO_OP_ERASE_TAB) 771 if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
772 tail += 3; 772 tail += 3;
773 else 773 else
774 tail += 2; 774 tail += 2;
@@ -810,7 +810,8 @@ static void process_echoes(struct tty_struct *tty)
810 struct n_tty_data *ldata = tty->disc_data; 810 struct n_tty_data *ldata = tty->disc_data;
811 size_t echoed; 811 size_t echoed;
812 812
813 if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_tail) 813 if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
814 ldata->echo_commit == ldata->echo_tail)
814 return; 815 return;
815 816
816 mutex_lock(&ldata->output_lock); 817 mutex_lock(&ldata->output_lock);
@@ -825,7 +826,8 @@ static void flush_echoes(struct tty_struct *tty)
825{ 826{
826 struct n_tty_data *ldata = tty->disc_data; 827 struct n_tty_data *ldata = tty->disc_data;
827 828
828 if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_head) 829 if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
830 ldata->echo_commit == ldata->echo_head)
829 return; 831 return;
830 832
831 mutex_lock(&ldata->output_lock); 833 mutex_lock(&ldata->output_lock);
@@ -1998,7 +2000,10 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
1998 found = 1; 2000 found = 1;
1999 2001
2000 size = N_TTY_BUF_SIZE - tail; 2002 size = N_TTY_BUF_SIZE - tail;
2001 n = (found + eol + size) & (N_TTY_BUF_SIZE - 1); 2003 n = eol - tail;
2004 if (n > 4096)
2005 n += 4096;
2006 n += found;
2002 c = n; 2007 c = n;
2003 2008
2004 if (found && read_buf(ldata, eol) == __DISABLED_CHAR) { 2009 if (found && read_buf(ldata, eol) == __DISABLED_CHAR) {
@@ -2243,18 +2248,19 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2243 if (time) 2248 if (time)
2244 timeout = time; 2249 timeout = time;
2245 } 2250 }
2246 mutex_unlock(&ldata->atomic_read_lock); 2251 n_tty_set_room(tty);
2247 remove_wait_queue(&tty->read_wait, &wait); 2252 up_read(&tty->termios_rwsem);
2248 2253
2254 remove_wait_queue(&tty->read_wait, &wait);
2249 if (!waitqueue_active(&tty->read_wait)) 2255 if (!waitqueue_active(&tty->read_wait))
2250 ldata->minimum_to_wake = minimum; 2256 ldata->minimum_to_wake = minimum;
2251 2257
2258 mutex_unlock(&ldata->atomic_read_lock);
2259
2252 __set_current_state(TASK_RUNNING); 2260 __set_current_state(TASK_RUNNING);
2253 if (b - buf) 2261 if (b - buf)
2254 retval = b - buf; 2262 retval = b - buf;
2255 2263
2256 n_tty_set_room(tty);
2257 up_read(&tty->termios_rwsem);
2258 return retval; 2264 return retval;
2259} 2265}
2260 2266
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index f3b306efaa59..23329918f229 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -41,7 +41,7 @@ config SERIAL_8250_DEPRECATED_OPTIONS
41 accept kernel parameters in both forms like 8250_core.nr_uarts=4 and 41 accept kernel parameters in both forms like 8250_core.nr_uarts=4 and
42 8250.nr_uarts=4. We now renamed the module back to 8250, but if 42 8250.nr_uarts=4. We now renamed the module back to 8250, but if
43 anybody noticed in 3.7 and changed their userspace we still have to 43 anybody noticed in 3.7 and changed their userspace we still have to
44 keep the 8350_core.* options around until they revert the changes 44 keep the 8250_core.* options around until they revert the changes
45 they already did. 45 they already did.
46 46
47 If 8250 is built as a module, this adds 8250_core alias instead. 47 If 8250 is built as a module, this adds 8250_core alias instead.
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 481b781b26e3..e9d420ff3931 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -2052,6 +2052,9 @@ static int __init pmz_console_init(void)
2052 /* Probe ports */ 2052 /* Probe ports */
2053 pmz_probe(); 2053 pmz_probe();
2054 2054
2055 if (pmz_ports_count == 0)
2056 return -ENODEV;
2057
2055 /* TODO: Autoprobe console based on OF */ 2058 /* TODO: Autoprobe console based on OF */
2056 /* pmz_console.index = i; */ 2059 /* pmz_console.index = i; */
2057 register_console(&pmz_console); 2060 register_console(&pmz_console);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 537750261aaa..7d8103cd3e2e 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work)
1433 desc = s->desc_rx[new]; 1433 desc = s->desc_rx[new];
1434 1434
1435 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != 1435 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1436 DMA_SUCCESS) { 1436 DMA_COMPLETE) {
1437 /* Handle incomplete DMA receive */ 1437 /* Handle incomplete DMA receive */
1438 struct dma_chan *chan = s->chan_rx; 1438 struct dma_chan *chan = s->chan_rx;
1439 struct shdma_desc *sh_desc = container_of(desc, 1439 struct shdma_desc *sh_desc = container_of(desc,
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 3a1a01af9a80..c74a00ad7add 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2086,6 +2086,7 @@ retry_open:
2086 filp->f_op = &tty_fops; 2086 filp->f_op = &tty_fops;
2087 goto retry_open; 2087 goto retry_open;
2088 } 2088 }
2089 clear_bit(TTY_HUPPED, &tty->flags);
2089 tty_unlock(tty); 2090 tty_unlock(tty);
2090 2091
2091 2092
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 67beb8444930..f7beb6eb40c7 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -653,6 +653,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
653 return -EINVAL; 653 return -EINVAL;
654 mem = idev->info->mem + mi; 654 mem = idev->info->mem + mi;
655 655
656 if (mem->addr & ~PAGE_MASK)
657 return -ENODEV;
656 if (vma->vm_end - vma->vm_start > mem->size) 658 if (vma->vm_end - vma->vm_start > mem->size)
657 return -EINVAL; 659 return -EINVAL;
658 660
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 3e7560f004f8..e8404319ca68 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1515,6 +1515,8 @@ static int acm_reset_resume(struct usb_interface *intf)
1515 1515
1516static const struct usb_device_id acm_ids[] = { 1516static const struct usb_device_id acm_ids[] = {
1517 /* quirky and broken devices */ 1517 /* quirky and broken devices */
1518 { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
1519 .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
1518 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ 1520 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
1519 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1521 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1520 }, 1522 },
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 06cec635e703..bd9dc3504b51 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4832,8 +4832,9 @@ static void hub_events(void)
4832 hub->ports[i - 1]->child; 4832 hub->ports[i - 1]->child;
4833 4833
4834 dev_dbg(hub_dev, "warm reset port %d\n", i); 4834 dev_dbg(hub_dev, "warm reset port %d\n", i);
4835 if (!udev || !(portstatus & 4835 if (!udev ||
4836 USB_PORT_STAT_CONNECTION)) { 4836 !(portstatus & USB_PORT_STAT_CONNECTION) ||
4837 udev->state == USB_STATE_NOTATTACHED) {
4837 status = hub_port_reset(hub, i, 4838 status = hub_port_reset(hub, i,
4838 NULL, HUB_BH_RESET_TIME, 4839 NULL, HUB_BH_RESET_TIME,
4839 true); 4840 true);
@@ -5501,6 +5502,6 @@ acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev,
5501 if (!hub) 5502 if (!hub)
5502 return NULL; 5503 return NULL;
5503 5504
5504 return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev); 5505 return ACPI_HANDLE(&hub->ports[port1 - 1]->dev);
5505} 5506}
5506#endif 5507#endif
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 255c14464bf2..4e243c37f17f 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -173,7 +173,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
173 } 173 }
174 174
175 /* root hub's parent is the usb hcd. */ 175 /* root hub's parent is the usb hcd. */
176 parent_handle = DEVICE_ACPI_HANDLE(dev->parent); 176 parent_handle = ACPI_HANDLE(dev->parent);
177 *handle = acpi_get_child(parent_handle, udev->portnum); 177 *handle = acpi_get_child(parent_handle, udev->portnum);
178 if (!*handle) 178 if (!*handle)
179 return -ENODEV; 179 return -ENODEV;
@@ -194,7 +194,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
194 194
195 raw_port_num = usb_hcd_find_raw_port_number(hcd, 195 raw_port_num = usb_hcd_find_raw_port_number(hcd,
196 port_num); 196 port_num);
197 *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), 197 *handle = acpi_get_child(ACPI_HANDLE(&udev->dev),
198 raw_port_num); 198 raw_port_num);
199 if (!*handle) 199 if (!*handle)
200 return -ENODEV; 200 return -ENODEV;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 95f7649c71a7..21a352079bc2 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -459,6 +459,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
459 dep = dwc3_wIndex_to_dep(dwc, wIndex); 459 dep = dwc3_wIndex_to_dep(dwc, wIndex);
460 if (!dep) 460 if (!dep)
461 return -EINVAL; 461 return -EINVAL;
462 if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
463 break;
462 ret = __dwc3_gadget_ep_set_halt(dep, set); 464 ret = __dwc3_gadget_ep_set_halt(dep, set);
463 if (ret) 465 if (ret)
464 return -EINVAL; 466 return -EINVAL;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 5452c0fce360..02e44fcaf205 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1200,9 +1200,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1200 else 1200 else
1201 dep->flags |= DWC3_EP_STALL; 1201 dep->flags |= DWC3_EP_STALL;
1202 } else { 1202 } else {
1203 if (dep->flags & DWC3_EP_WEDGE)
1204 return 0;
1205
1206 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1203 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1207 DWC3_DEPCMD_CLEARSTALL, &params); 1204 DWC3_DEPCMD_CLEARSTALL, &params);
1208 if (ret) 1205 if (ret)
@@ -1210,7 +1207,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1210 value ? "set" : "clear", 1207 value ? "set" : "clear",
1211 dep->name); 1208 dep->name);
1212 else 1209 else
1213 dep->flags &= ~DWC3_EP_STALL; 1210 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1214 } 1211 }
1215 1212
1216 return ret; 1213 return ret;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index a91e6422f930..f66d96ad1f51 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -682,6 +682,7 @@ config USB_CONFIGFS_PHONET
682config USB_CONFIGFS_MASS_STORAGE 682config USB_CONFIGFS_MASS_STORAGE
683 boolean "Mass storage" 683 boolean "Mass storage"
684 depends on USB_CONFIGFS 684 depends on USB_CONFIGFS
685 depends on BLOCK
685 select USB_F_MASS_STORAGE 686 select USB_F_MASS_STORAGE
686 help 687 help
687 The Mass Storage Gadget acts as a USB Mass Storage disk drive. 688 The Mass Storage Gadget acts as a USB Mass Storage disk drive.
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 3e7ae707f691..2018ba1a2172 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -593,6 +593,7 @@ static void reset_config(struct usb_composite_dev *cdev)
593 bitmap_zero(f->endpoints, 32); 593 bitmap_zero(f->endpoints, 32);
594 } 594 }
595 cdev->config = NULL; 595 cdev->config = NULL;
596 cdev->delayed_status = 0;
596} 597}
597 598
598static int set_config(struct usb_composite_dev *cdev, 599static int set_config(struct usb_composite_dev *cdev,
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 774e8b89cdb5..241fc873ffa4 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1304,7 +1304,7 @@ static struct ffs_data *ffs_data_new(void)
1304{ 1304{
1305 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); 1305 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1306 if (unlikely(!ffs)) 1306 if (unlikely(!ffs))
1307 return 0; 1307 return NULL;
1308 1308
1309 ENTER(); 1309 ENTER();
1310 1310
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a03ba2c83589..b96393908860 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -523,7 +523,7 @@ static int fsg_setup(struct usb_function *f,
523 */ 523 */
524 DBG(fsg, "bulk reset request\n"); 524 DBG(fsg, "bulk reset request\n");
525 raise_exception(fsg->common, FSG_STATE_RESET); 525 raise_exception(fsg->common, FSG_STATE_RESET);
526 return DELAYED_STATUS; 526 return USB_GADGET_DELAYED_STATUS;
527 527
528 case US_BULK_GET_MAX_LUN: 528 case US_BULK_GET_MAX_LUN:
529 if (ctrl->bRequestType != 529 if (ctrl->bRequestType !=
@@ -602,13 +602,14 @@ static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
602 return true; 602 return true;
603} 603}
604 604
605static int sleep_thread(struct fsg_common *common) 605static int sleep_thread(struct fsg_common *common, bool can_freeze)
606{ 606{
607 int rc = 0; 607 int rc = 0;
608 608
609 /* Wait until a signal arrives or we are woken up */ 609 /* Wait until a signal arrives or we are woken up */
610 for (;;) { 610 for (;;) {
611 try_to_freeze(); 611 if (can_freeze)
612 try_to_freeze();
612 set_current_state(TASK_INTERRUPTIBLE); 613 set_current_state(TASK_INTERRUPTIBLE);
613 if (signal_pending(current)) { 614 if (signal_pending(current)) {
614 rc = -EINTR; 615 rc = -EINTR;
@@ -682,7 +683,7 @@ static int do_read(struct fsg_common *common)
682 /* Wait for the next buffer to become available */ 683 /* Wait for the next buffer to become available */
683 bh = common->next_buffhd_to_fill; 684 bh = common->next_buffhd_to_fill;
684 while (bh->state != BUF_STATE_EMPTY) { 685 while (bh->state != BUF_STATE_EMPTY) {
685 rc = sleep_thread(common); 686 rc = sleep_thread(common, false);
686 if (rc) 687 if (rc)
687 return rc; 688 return rc;
688 } 689 }
@@ -937,7 +938,7 @@ static int do_write(struct fsg_common *common)
937 } 938 }
938 939
939 /* Wait for something to happen */ 940 /* Wait for something to happen */
940 rc = sleep_thread(common); 941 rc = sleep_thread(common, false);
941 if (rc) 942 if (rc)
942 return rc; 943 return rc;
943 } 944 }
@@ -1504,7 +1505,7 @@ static int throw_away_data(struct fsg_common *common)
1504 } 1505 }
1505 1506
1506 /* Otherwise wait for something to happen */ 1507 /* Otherwise wait for something to happen */
1507 rc = sleep_thread(common); 1508 rc = sleep_thread(common, true);
1508 if (rc) 1509 if (rc)
1509 return rc; 1510 return rc;
1510 } 1511 }
@@ -1625,7 +1626,7 @@ static int send_status(struct fsg_common *common)
1625 /* Wait for the next buffer to become available */ 1626 /* Wait for the next buffer to become available */
1626 bh = common->next_buffhd_to_fill; 1627 bh = common->next_buffhd_to_fill;
1627 while (bh->state != BUF_STATE_EMPTY) { 1628 while (bh->state != BUF_STATE_EMPTY) {
1628 rc = sleep_thread(common); 1629 rc = sleep_thread(common, true);
1629 if (rc) 1630 if (rc)
1630 return rc; 1631 return rc;
1631 } 1632 }
@@ -1828,7 +1829,7 @@ static int do_scsi_command(struct fsg_common *common)
1828 bh = common->next_buffhd_to_fill; 1829 bh = common->next_buffhd_to_fill;
1829 common->next_buffhd_to_drain = bh; 1830 common->next_buffhd_to_drain = bh;
1830 while (bh->state != BUF_STATE_EMPTY) { 1831 while (bh->state != BUF_STATE_EMPTY) {
1831 rc = sleep_thread(common); 1832 rc = sleep_thread(common, true);
1832 if (rc) 1833 if (rc)
1833 return rc; 1834 return rc;
1834 } 1835 }
@@ -2174,7 +2175,7 @@ static int get_next_command(struct fsg_common *common)
2174 /* Wait for the next buffer to become available */ 2175 /* Wait for the next buffer to become available */
2175 bh = common->next_buffhd_to_fill; 2176 bh = common->next_buffhd_to_fill;
2176 while (bh->state != BUF_STATE_EMPTY) { 2177 while (bh->state != BUF_STATE_EMPTY) {
2177 rc = sleep_thread(common); 2178 rc = sleep_thread(common, true);
2178 if (rc) 2179 if (rc)
2179 return rc; 2180 return rc;
2180 } 2181 }
@@ -2193,7 +2194,7 @@ static int get_next_command(struct fsg_common *common)
2193 2194
2194 /* Wait for the CBW to arrive */ 2195 /* Wait for the CBW to arrive */
2195 while (bh->state != BUF_STATE_FULL) { 2196 while (bh->state != BUF_STATE_FULL) {
2196 rc = sleep_thread(common); 2197 rc = sleep_thread(common, true);
2197 if (rc) 2198 if (rc)
2198 return rc; 2199 return rc;
2199 } 2200 }
@@ -2379,7 +2380,7 @@ static void handle_exception(struct fsg_common *common)
2379 } 2380 }
2380 if (num_active == 0) 2381 if (num_active == 0)
2381 break; 2382 break;
2382 if (sleep_thread(common)) 2383 if (sleep_thread(common, true))
2383 return; 2384 return;
2384 } 2385 }
2385 2386
@@ -2516,7 +2517,7 @@ static int fsg_main_thread(void *common_)
2516 } 2517 }
2517 2518
2518 if (!common->running) { 2519 if (!common->running) {
2519 sleep_thread(common); 2520 sleep_thread(common, true);
2520 continue; 2521 continue;
2521 } 2522 }
2522 2523
@@ -3111,7 +3112,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
3111 fsg->common->can_stall); 3112 fsg->common->can_stall);
3112 if (ret) 3113 if (ret)
3113 return ret; 3114 return ret;
3114 fsg_common_set_inquiry_string(fsg->common, 0, 0); 3115 fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
3115 ret = fsg_common_run_thread(fsg->common); 3116 ret = fsg_common_run_thread(fsg->common);
3116 if (ret) 3117 if (ret)
3117 return ret; 3118 return ret;
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 0ac6064aa3b8..409a3c45a36a 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -54,6 +54,7 @@
54 */ 54 */
55#ifdef CONFIG_ARCH_PXA 55#ifdef CONFIG_ARCH_PXA
56#include <mach/pxa25x-udc.h> 56#include <mach/pxa25x-udc.h>
57#include <mach/hardware.h>
57#endif 58#endif
58 59
59#ifdef CONFIG_ARCH_LUBBOCK 60#ifdef CONFIG_ARCH_LUBBOCK
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 9875d9c0823f..e20bc109fdd7 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -1180,6 +1180,7 @@ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
1180} 1180}
1181 1181
1182static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg); 1182static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
1183static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg);
1183 1184
1184/** 1185/**
1185 * s3c_hsotg_process_control - process a control request 1186 * s3c_hsotg_process_control - process a control request
@@ -1221,6 +1222,7 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
1221 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1222 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1222 switch (ctrl->bRequest) { 1223 switch (ctrl->bRequest) {
1223 case USB_REQ_SET_ADDRESS: 1224 case USB_REQ_SET_ADDRESS:
1225 s3c_hsotg_disconnect(hsotg);
1224 dcfg = readl(hsotg->regs + DCFG); 1226 dcfg = readl(hsotg->regs + DCFG);
1225 dcfg &= ~DCFG_DevAddr_MASK; 1227 dcfg &= ~DCFG_DevAddr_MASK;
1226 dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT; 1228 dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT;
@@ -1245,7 +1247,9 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
1245 /* as a fallback, try delivering it to the driver to deal with */ 1247 /* as a fallback, try delivering it to the driver to deal with */
1246 1248
1247 if (ret == 0 && hsotg->driver) { 1249 if (ret == 0 && hsotg->driver) {
1250 spin_unlock(&hsotg->lock);
1248 ret = hsotg->driver->setup(&hsotg->gadget, ctrl); 1251 ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1252 spin_lock(&hsotg->lock);
1249 if (ret < 0) 1253 if (ret < 0)
1250 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); 1254 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1251 } 1255 }
@@ -1308,10 +1312,12 @@ static void s3c_hsotg_complete_setup(struct usb_ep *ep,
1308 return; 1312 return;
1309 } 1313 }
1310 1314
1315 spin_lock(&hsotg->lock);
1311 if (req->actual == 0) 1316 if (req->actual == 0)
1312 s3c_hsotg_enqueue_setup(hsotg); 1317 s3c_hsotg_enqueue_setup(hsotg);
1313 else 1318 else
1314 s3c_hsotg_process_control(hsotg, req->buf); 1319 s3c_hsotg_process_control(hsotg, req->buf);
1320 spin_unlock(&hsotg->lock);
1315} 1321}
1316 1322
1317/** 1323/**
@@ -2533,7 +2539,6 @@ irq_retry:
2533 writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS); 2539 writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS);
2534 2540
2535 call_gadget(hsotg, suspend); 2541 call_gadget(hsotg, suspend);
2536 s3c_hsotg_disconnect(hsotg);
2537 } 2542 }
2538 2543
2539 if (gintsts & GINTSTS_WkUpInt) { 2544 if (gintsts & GINTSTS_WkUpInt) {
diff --git a/drivers/usb/gadget/storage_common.h b/drivers/usb/gadget/storage_common.h
index c74c2fdbd56e..70c891469f57 100644
--- a/drivers/usb/gadget/storage_common.h
+++ b/drivers/usb/gadget/storage_common.h
@@ -119,10 +119,6 @@ static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
119 return curlun->filp != NULL; 119 return curlun->filp != NULL;
120} 120}
121 121
122/* Big enough to hold our biggest descriptor */
123#define EP0_BUFSIZE 256
124#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */
125
126/* Default size of buffer length. */ 122/* Default size of buffer length. */
127#define FSG_BUFLEN ((u32)16384) 123#define FSG_BUFLEN ((u32)16384)
128 124
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index eccea1df702d..0f8aad78b54f 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -370,7 +370,7 @@ err:
370 return -ENOMEM; 370 return -ENOMEM;
371} 371}
372 372
373void bot_cleanup_old_alt(struct f_uas *fu) 373static void bot_cleanup_old_alt(struct f_uas *fu)
374{ 374{
375 if (!(fu->flags & USBG_ENABLED)) 375 if (!(fu->flags & USBG_ENABLED))
376 return; 376 return;
@@ -1923,15 +1923,15 @@ static int usbg_register_configfs(void)
1923 } 1923 }
1924 1924
1925 fabric->tf_ops = usbg_ops; 1925 fabric->tf_ops = usbg_ops;
1926 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs; 1926 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
1927 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs; 1927 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
1928 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 1928 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
1929 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1929 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1930 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1930 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
1931 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1931 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1932 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1932 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1933 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1933 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1934 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1934 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1935 ret = target_fabric_configfs_register(fabric); 1935 ret = target_fabric_configfs_register(fabric);
1936 if (ret < 0) { 1936 if (ret < 0) {
1937 printk(KERN_ERR "target_fabric_configfs_register() failed" 1937 printk(KERN_ERR "target_fabric_configfs_register() failed"
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 0dd07ae1555d..f49b0b61ecc8 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -91,17 +91,17 @@ static struct usb_zero_options gzero_options = {
91 * functional coverage for the "USBCV" test harness from USB-IF. 91 * functional coverage for the "USBCV" test harness from USB-IF.
92 * It's always set if OTG mode is enabled. 92 * It's always set if OTG mode is enabled.
93 */ 93 */
94unsigned autoresume = DEFAULT_AUTORESUME; 94static unsigned autoresume = DEFAULT_AUTORESUME;
95module_param(autoresume, uint, S_IRUGO); 95module_param(autoresume, uint, S_IRUGO);
96MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup"); 96MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup");
97 97
98/* Maximum Autoresume time */ 98/* Maximum Autoresume time */
99unsigned max_autoresume; 99static unsigned max_autoresume;
100module_param(max_autoresume, uint, S_IRUGO); 100module_param(max_autoresume, uint, S_IRUGO);
101MODULE_PARM_DESC(max_autoresume, "maximum seconds before remote wakeup"); 101MODULE_PARM_DESC(max_autoresume, "maximum seconds before remote wakeup");
102 102
103/* Interval between two remote wakeups */ 103/* Interval between two remote wakeups */
104unsigned autoresume_interval_ms; 104static unsigned autoresume_interval_ms;
105module_param(autoresume_interval_ms, uint, S_IRUGO); 105module_param(autoresume_interval_ms, uint, S_IRUGO);
106MODULE_PARM_DESC(autoresume_interval_ms, 106MODULE_PARM_DESC(autoresume_interval_ms,
107 "milliseconds to increase successive wakeup delays"); 107 "milliseconds to increase successive wakeup delays");
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index e89ac4d4b87e..9b7435f0dcd6 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/dma-mapping.h>
24#include <linux/io.h> 25#include <linux/io.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/module.h> 27#include <linux/module.h>
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1e2f3f495843..53c2e296467f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2973,8 +2973,58 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2973 } 2973 }
2974 2974
2975 while (1) { 2975 while (1) {
2976 if (room_on_ring(xhci, ep_ring, num_trbs)) 2976 if (room_on_ring(xhci, ep_ring, num_trbs)) {
2977 break; 2977 union xhci_trb *trb = ep_ring->enqueue;
2978 unsigned int usable = ep_ring->enq_seg->trbs +
2979 TRBS_PER_SEGMENT - 1 - trb;
2980 u32 nop_cmd;
2981
2982 /*
2983 * Section 4.11.7.1 TD Fragments states that a link
2984 * TRB must only occur at the boundary between
2985 * data bursts (eg 512 bytes for 480M).
2986 * While it is possible to split a large fragment
2987 * we don't know the size yet.
2988 * Simplest solution is to fill the trb before the
2989 * LINK with nop commands.
2990 */
2991 if (num_trbs == 1 || num_trbs <= usable || usable == 0)
2992 break;
2993
2994 if (ep_ring->type != TYPE_BULK)
2995 /*
2996 * While isoc transfers might have a buffer that
2997 * crosses a 64k boundary it is unlikely.
2998 * Since we can't add NOPs without generating
2999 * gaps in the traffic just hope it never
3000 * happens at the end of the ring.
3001 * This could be fixed by writing a LINK TRB
3002 * instead of the first NOP - however the
3003 * TRB_TYPE_LINK_LE32() calls would all need
3004 * changing to check the ring length.
3005 */
3006 break;
3007
3008 if (num_trbs >= TRBS_PER_SEGMENT) {
3009 xhci_err(xhci, "Too many fragments %d, max %d\n",
3010 num_trbs, TRBS_PER_SEGMENT - 1);
3011 return -ENOMEM;
3012 }
3013
3014 nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
3015 ep_ring->cycle_state);
3016 ep_ring->num_trbs_free -= usable;
3017 do {
3018 trb->generic.field[0] = 0;
3019 trb->generic.field[1] = 0;
3020 trb->generic.field[2] = 0;
3021 trb->generic.field[3] = nop_cmd;
3022 trb++;
3023 } while (--usable);
3024 ep_ring->enqueue = trb;
3025 if (room_on_ring(xhci, ep_ring, num_trbs))
3026 break;
3027 }
2978 3028
2979 if (ep_ring == xhci->cmd_ring) { 3029 if (ep_ring == xhci->cmd_ring) {
2980 xhci_err(xhci, "Do not support expand command ring\n"); 3030 xhci_err(xhci, "Do not support expand command ring\n");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 0a43329569d1..4d4499b80449 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1809,7 +1809,6 @@ static void musb_free(struct musb *musb)
1809 disable_irq_wake(musb->nIrq); 1809 disable_irq_wake(musb->nIrq);
1810 free_irq(musb->nIrq, musb); 1810 free_irq(musb->nIrq, musb);
1811 } 1811 }
1812 cancel_work_sync(&musb->irq_work);
1813 1812
1814 musb_host_free(musb); 1813 musb_host_free(musb);
1815} 1814}
@@ -1896,6 +1895,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1896 musb_platform_disable(musb); 1895 musb_platform_disable(musb);
1897 musb_generic_disable(musb); 1896 musb_generic_disable(musb);
1898 1897
1898 /* Init IRQ workqueue before request_irq */
1899 INIT_WORK(&musb->irq_work, musb_irq_work);
1900
1899 /* setup musb parts of the core (especially endpoints) */ 1901 /* setup musb parts of the core (especially endpoints) */
1900 status = musb_core_init(plat->config->multipoint 1902 status = musb_core_init(plat->config->multipoint
1901 ? MUSB_CONTROLLER_MHDRC 1903 ? MUSB_CONTROLLER_MHDRC
@@ -1905,9 +1907,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1905 1907
1906 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); 1908 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
1907 1909
1908 /* Init IRQ workqueue before request_irq */
1909 INIT_WORK(&musb->irq_work, musb_irq_work);
1910
1911 /* attach to the IRQ */ 1910 /* attach to the IRQ */
1912 if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) { 1911 if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
1913 dev_err(dev, "request_irq %d failed!\n", nIrq); 1912 dev_err(dev, "request_irq %d failed!\n", nIrq);
@@ -1981,6 +1980,7 @@ fail4:
1981 musb_host_cleanup(musb); 1980 musb_host_cleanup(musb);
1982 1981
1983fail3: 1982fail3:
1983 cancel_work_sync(&musb->irq_work);
1984 if (musb->dma_controller) 1984 if (musb->dma_controller)
1985 dma_controller_destroy(musb->dma_controller); 1985 dma_controller_destroy(musb->dma_controller);
1986fail2_5: 1986fail2_5:
@@ -2043,6 +2043,7 @@ static int musb_remove(struct platform_device *pdev)
2043 if (musb->dma_controller) 2043 if (musb->dma_controller)
2044 dma_controller_destroy(musb->dma_controller); 2044 dma_controller_destroy(musb->dma_controller);
2045 2045
2046 cancel_work_sync(&musb->irq_work);
2046 musb_free(musb); 2047 musb_free(musb);
2047 device_init_wakeup(dev, 0); 2048 device_init_wakeup(dev, 0);
2048 return 0; 2049 return 0;
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index ff9d6de2b746..a12bd30401e0 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -38,6 +38,7 @@ struct cppi41_dma_channel {
38 u32 prog_len; 38 u32 prog_len;
39 u32 transferred; 39 u32 transferred;
40 u32 packet_sz; 40 u32 packet_sz;
41 struct list_head tx_check;
41}; 42};
42 43
43#define MUSB_DMA_NUM_CHANNELS 15 44#define MUSB_DMA_NUM_CHANNELS 15
@@ -47,6 +48,8 @@ struct cppi41_dma_controller {
47 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; 48 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
48 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; 49 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
49 struct musb *musb; 50 struct musb *musb;
51 struct hrtimer early_tx;
52 struct list_head early_tx_list;
50 u32 rx_mode; 53 u32 rx_mode;
51 u32 tx_mode; 54 u32 tx_mode;
52 u32 auto_req; 55 u32 auto_req;
@@ -96,31 +99,27 @@ static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
96 cppi41_channel->usb_toggle = toggle; 99 cppi41_channel->usb_toggle = toggle;
97} 100}
98 101
99static void cppi41_dma_callback(void *private_data) 102static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
100{ 103{
101 struct dma_channel *channel = private_data; 104 u8 epnum = hw_ep->epnum;
102 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 105 struct musb *musb = hw_ep->musb;
103 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 106 void __iomem *epio = musb->endpoints[epnum].regs;
104 struct musb *musb = hw_ep->musb; 107 u16 csr;
105 unsigned long flags;
106 struct dma_tx_state txstate;
107 u32 transferred;
108 108
109 spin_lock_irqsave(&musb->lock, flags); 109 csr = musb_readw(epio, MUSB_TXCSR);
110 if (csr & MUSB_TXCSR_TXPKTRDY)
111 return false;
112 return true;
113}
110 114
111 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, 115static void cppi41_dma_callback(void *private_data);
112 &txstate);
113 transferred = cppi41_channel->prog_len - txstate.residue;
114 cppi41_channel->transferred += transferred;
115 116
116 dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n", 117static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
117 hw_ep->epnum, cppi41_channel->transferred, 118{
118 cppi41_channel->total_len); 119 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
120 struct musb *musb = hw_ep->musb;
119 121
120 update_rx_toggle(cppi41_channel); 122 if (!cppi41_channel->prog_len) {
121
122 if (cppi41_channel->transferred == cppi41_channel->total_len ||
123 transferred < cppi41_channel->packet_sz) {
124 123
125 /* done, complete */ 124 /* done, complete */
126 cppi41_channel->channel.actual_len = 125 cppi41_channel->channel.actual_len =
@@ -150,13 +149,11 @@ static void cppi41_dma_callback(void *private_data)
150 remain_bytes, 149 remain_bytes,
151 direction, 150 direction,
152 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 151 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
153 if (WARN_ON(!dma_desc)) { 152 if (WARN_ON(!dma_desc))
154 spin_unlock_irqrestore(&musb->lock, flags);
155 return; 153 return;
156 }
157 154
158 dma_desc->callback = cppi41_dma_callback; 155 dma_desc->callback = cppi41_dma_callback;
159 dma_desc->callback_param = channel; 156 dma_desc->callback_param = &cppi41_channel->channel;
160 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); 157 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
161 dma_async_issue_pending(dc); 158 dma_async_issue_pending(dc);
162 159
@@ -166,6 +163,117 @@ static void cppi41_dma_callback(void *private_data)
166 musb_writew(epio, MUSB_RXCSR, csr); 163 musb_writew(epio, MUSB_RXCSR, csr);
167 } 164 }
168 } 165 }
166}
167
168static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
169{
170 struct cppi41_dma_controller *controller;
171 struct cppi41_dma_channel *cppi41_channel, *n;
172 struct musb *musb;
173 unsigned long flags;
174 enum hrtimer_restart ret = HRTIMER_NORESTART;
175
176 controller = container_of(timer, struct cppi41_dma_controller,
177 early_tx);
178 musb = controller->musb;
179
180 spin_lock_irqsave(&musb->lock, flags);
181 list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
182 tx_check) {
183 bool empty;
184 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
185
186 empty = musb_is_tx_fifo_empty(hw_ep);
187 if (empty) {
188 list_del_init(&cppi41_channel->tx_check);
189 cppi41_trans_done(cppi41_channel);
190 }
191 }
192
193 if (!list_empty(&controller->early_tx_list)) {
194 ret = HRTIMER_RESTART;
195 hrtimer_forward_now(&controller->early_tx,
196 ktime_set(0, 150 * NSEC_PER_USEC));
197 }
198
199 spin_unlock_irqrestore(&musb->lock, flags);
200 return ret;
201}
202
203static void cppi41_dma_callback(void *private_data)
204{
205 struct dma_channel *channel = private_data;
206 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
207 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
208 struct musb *musb = hw_ep->musb;
209 unsigned long flags;
210 struct dma_tx_state txstate;
211 u32 transferred;
212 bool empty;
213
214 spin_lock_irqsave(&musb->lock, flags);
215
216 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
217 &txstate);
218 transferred = cppi41_channel->prog_len - txstate.residue;
219 cppi41_channel->transferred += transferred;
220
221 dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
222 hw_ep->epnum, cppi41_channel->transferred,
223 cppi41_channel->total_len);
224
225 update_rx_toggle(cppi41_channel);
226
227 if (cppi41_channel->transferred == cppi41_channel->total_len ||
228 transferred < cppi41_channel->packet_sz)
229 cppi41_channel->prog_len = 0;
230
231 empty = musb_is_tx_fifo_empty(hw_ep);
232 if (empty) {
233 cppi41_trans_done(cppi41_channel);
234 } else {
235 struct cppi41_dma_controller *controller;
236 /*
237 * On AM335x it has been observed that the TX interrupt fires
238 * too early that means the TXFIFO is not yet empty but the DMA
239 * engine says that it is done with the transfer. We don't
240 * receive a FIFO empty interrupt so the only thing we can do is
241 * to poll for the bit. On HS it usually takes 2us, on FS around
242 * 110us - 150us depending on the transfer size.
243 * We spin on HS (no longer than than 25us and setup a timer on
244 * FS to check for the bit and complete the transfer.
245 */
246 controller = cppi41_channel->controller;
247
248 if (musb->g.speed == USB_SPEED_HIGH) {
249 unsigned wait = 25;
250
251 do {
252 empty = musb_is_tx_fifo_empty(hw_ep);
253 if (empty)
254 break;
255 wait--;
256 if (!wait)
257 break;
258 udelay(1);
259 } while (1);
260
261 empty = musb_is_tx_fifo_empty(hw_ep);
262 if (empty) {
263 cppi41_trans_done(cppi41_channel);
264 goto out;
265 }
266 }
267 list_add_tail(&cppi41_channel->tx_check,
268 &controller->early_tx_list);
269 if (!hrtimer_active(&controller->early_tx)) {
270 hrtimer_start_range_ns(&controller->early_tx,
271 ktime_set(0, 140 * NSEC_PER_USEC),
272 40 * NSEC_PER_USEC,
273 HRTIMER_MODE_REL);
274 }
275 }
276out:
169 spin_unlock_irqrestore(&musb->lock, flags); 277 spin_unlock_irqrestore(&musb->lock, flags);
170} 278}
171 279
@@ -364,6 +472,8 @@ static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
364 WARN_ON(1); 472 WARN_ON(1);
365 return 1; 473 return 1;
366 } 474 }
475 if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
476 return 0;
367 if (cppi41_channel->is_tx) 477 if (cppi41_channel->is_tx)
368 return 1; 478 return 1;
369 /* AM335x Advisory 1.0.13. No workaround for device RX mode */ 479 /* AM335x Advisory 1.0.13. No workaround for device RX mode */
@@ -388,6 +498,7 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
388 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) 498 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
389 return 0; 499 return 0;
390 500
501 list_del_init(&cppi41_channel->tx_check);
391 if (is_tx) { 502 if (is_tx) {
392 csr = musb_readw(epio, MUSB_TXCSR); 503 csr = musb_readw(epio, MUSB_TXCSR);
393 csr &= ~MUSB_TXCSR_DMAENAB; 504 csr &= ~MUSB_TXCSR_DMAENAB;
@@ -495,6 +606,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
495 cppi41_channel->controller = controller; 606 cppi41_channel->controller = controller;
496 cppi41_channel->port_num = port; 607 cppi41_channel->port_num = port;
497 cppi41_channel->is_tx = is_tx; 608 cppi41_channel->is_tx = is_tx;
609 INIT_LIST_HEAD(&cppi41_channel->tx_check);
498 610
499 musb_dma = &cppi41_channel->channel; 611 musb_dma = &cppi41_channel->channel;
500 musb_dma->private_data = cppi41_channel; 612 musb_dma->private_data = cppi41_channel;
@@ -520,6 +632,7 @@ void dma_controller_destroy(struct dma_controller *c)
520 struct cppi41_dma_controller *controller = container_of(c, 632 struct cppi41_dma_controller *controller = container_of(c,
521 struct cppi41_dma_controller, controller); 633 struct cppi41_dma_controller, controller);
522 634
635 hrtimer_cancel(&controller->early_tx);
523 cppi41_dma_controller_stop(controller); 636 cppi41_dma_controller_stop(controller);
524 kfree(controller); 637 kfree(controller);
525} 638}
@@ -539,6 +652,9 @@ struct dma_controller *dma_controller_create(struct musb *musb,
539 if (!controller) 652 if (!controller)
540 goto kzalloc_fail; 653 goto kzalloc_fail;
541 654
655 hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
656 controller->early_tx.function = cppi41_recheck_tx_req;
657 INIT_LIST_HEAD(&controller->early_tx_list);
542 controller->musb = musb; 658 controller->musb = musb;
543 659
544 controller->controller.channel_alloc = cppi41_dma_channel_allocate; 660 controller->controller.channel_alloc = cppi41_dma_channel_allocate;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d2d3a173b315..32fb057c03f5 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1796,7 +1796,11 @@ int musb_gadget_setup(struct musb *musb)
1796 1796
1797 /* this "gadget" abstracts/virtualizes the controller */ 1797 /* this "gadget" abstracts/virtualizes the controller */
1798 musb->g.name = musb_driver_name; 1798 musb->g.name = musb_driver_name;
1799#if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
1799 musb->g.is_otg = 1; 1800 musb->g.is_otg = 1;
1801#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
1802 musb->g.is_otg = 0;
1803#endif
1800 1804
1801 musb_g_init_endpoints(musb); 1805 musb_g_init_endpoints(musb);
1802 1806
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 6370e50649d7..0e3c60cb669a 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -52,8 +52,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
52 return am_phy->id; 52 return am_phy->id;
53 } 53 }
54 54
55 ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, 55 ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL);
56 USB_PHY_TYPE_USB2, 0, false);
57 if (ret) 56 if (ret)
58 return ret; 57 return ret;
59 58
@@ -66,8 +65,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
66 platform_set_drvdata(pdev, am_phy); 65 platform_set_drvdata(pdev, am_phy);
67 66
68 return 0; 67 return 0;
69
70 return ret;
71} 68}
72 69
73static int am335x_phy_remove(struct platform_device *pdev) 70static int am335x_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index fce3a9e9bb5d..aa6d37b3378a 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -48,8 +48,9 @@ void usb_nop_xceiv_register(void)
48 if (pd) 48 if (pd)
49 return; 49 return;
50 pd = platform_device_register_simple("usb_phy_gen_xceiv", -1, NULL, 0); 50 pd = platform_device_register_simple("usb_phy_gen_xceiv", -1, NULL, 0);
51 if (!pd) { 51 if (IS_ERR(pd)) {
52 pr_err("Unable to register generic usb transceiver\n"); 52 pr_err("Unable to register generic usb transceiver\n");
53 pd = NULL;
53 return; 54 return;
54 } 55 }
55} 56}
@@ -150,10 +151,40 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
150} 151}
151 152
152int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, 153int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
153 enum usb_phy_type type, u32 clk_rate, bool needs_vcc) 154 struct usb_phy_gen_xceiv_platform_data *pdata)
154{ 155{
156 enum usb_phy_type type = USB_PHY_TYPE_USB2;
155 int err; 157 int err;
156 158
159 u32 clk_rate = 0;
160 bool needs_vcc = false;
161
162 nop->reset_active_low = true; /* default behaviour */
163
164 if (dev->of_node) {
165 struct device_node *node = dev->of_node;
166 enum of_gpio_flags flags = 0;
167
168 if (of_property_read_u32(node, "clock-frequency", &clk_rate))
169 clk_rate = 0;
170
171 needs_vcc = of_property_read_bool(node, "vcc-supply");
172 nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios",
173 0, &flags);
174 if (nop->gpio_reset == -EPROBE_DEFER)
175 return -EPROBE_DEFER;
176
177 nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
178
179 } else if (pdata) {
180 type = pdata->type;
181 clk_rate = pdata->clk_rate;
182 needs_vcc = pdata->needs_vcc;
183 nop->gpio_reset = pdata->gpio_reset;
184 } else {
185 nop->gpio_reset = -1;
186 }
187
157 nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg), 188 nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg),
158 GFP_KERNEL); 189 GFP_KERNEL);
159 if (!nop->phy.otg) 190 if (!nop->phy.otg)
@@ -218,43 +249,14 @@ EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy);
218static int usb_phy_gen_xceiv_probe(struct platform_device *pdev) 249static int usb_phy_gen_xceiv_probe(struct platform_device *pdev)
219{ 250{
220 struct device *dev = &pdev->dev; 251 struct device *dev = &pdev->dev;
221 struct usb_phy_gen_xceiv_platform_data *pdata =
222 dev_get_platdata(&pdev->dev);
223 struct usb_phy_gen_xceiv *nop; 252 struct usb_phy_gen_xceiv *nop;
224 enum usb_phy_type type = USB_PHY_TYPE_USB2;
225 int err; 253 int err;
226 u32 clk_rate = 0;
227 bool needs_vcc = false;
228 254
229 nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL); 255 nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL);
230 if (!nop) 256 if (!nop)
231 return -ENOMEM; 257 return -ENOMEM;
232 258
233 nop->reset_active_low = true; /* default behaviour */ 259 err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev));
234
235 if (dev->of_node) {
236 struct device_node *node = dev->of_node;
237 enum of_gpio_flags flags;
238
239 if (of_property_read_u32(node, "clock-frequency", &clk_rate))
240 clk_rate = 0;
241
242 needs_vcc = of_property_read_bool(node, "vcc-supply");
243 nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios",
244 0, &flags);
245 if (nop->gpio_reset == -EPROBE_DEFER)
246 return -EPROBE_DEFER;
247
248 nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
249
250 } else if (pdata) {
251 type = pdata->type;
252 clk_rate = pdata->clk_rate;
253 needs_vcc = pdata->needs_vcc;
254 nop->gpio_reset = pdata->gpio_reset;
255 }
256
257 err = usb_phy_gen_create_phy(dev, nop, type, clk_rate, needs_vcc);
258 if (err) 260 if (err)
259 return err; 261 return err;
260 262
@@ -271,8 +273,6 @@ static int usb_phy_gen_xceiv_probe(struct platform_device *pdev)
271 platform_set_drvdata(pdev, nop); 273 platform_set_drvdata(pdev, nop);
272 274
273 return 0; 275 return 0;
274
275 return err;
276} 276}
277 277
278static int usb_phy_gen_xceiv_remove(struct platform_device *pdev) 278static int usb_phy_gen_xceiv_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
index d2a220d81734..38a81f307b82 100644
--- a/drivers/usb/phy/phy-generic.h
+++ b/drivers/usb/phy/phy-generic.h
@@ -1,6 +1,8 @@
1#ifndef _PHY_GENERIC_H_ 1#ifndef _PHY_GENERIC_H_
2#define _PHY_GENERIC_H_ 2#define _PHY_GENERIC_H_
3 3
4#include <linux/usb/usb_phy_gen_xceiv.h>
5
4struct usb_phy_gen_xceiv { 6struct usb_phy_gen_xceiv {
5 struct usb_phy phy; 7 struct usb_phy phy;
6 struct device *dev; 8 struct device *dev;
@@ -14,6 +16,6 @@ int usb_gen_phy_init(struct usb_phy *phy);
14void usb_gen_phy_shutdown(struct usb_phy *phy); 16void usb_gen_phy_shutdown(struct usb_phy *phy);
15 17
16int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, 18int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
17 enum usb_phy_type type, u32 clk_rate, bool needs_vcc); 19 struct usb_phy_gen_xceiv_platform_data *pdata);
18 20
19#endif 21#endif
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index fdd33b44dbd3..545844b7e796 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -164,7 +164,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
164 164
165 mxs_phy->clk = clk; 165 mxs_phy->clk = clk;
166 166
167 platform_set_drvdata(pdev, &mxs_phy->phy); 167 platform_set_drvdata(pdev, mxs_phy);
168 168
169 ret = usb_add_phy_dev(&mxs_phy->phy); 169 ret = usb_add_phy_dev(&mxs_phy->phy);
170 if (ret) 170 if (ret)
diff --git a/drivers/usb/phy/phy-rcar-gen2-usb.c b/drivers/usb/phy/phy-rcar-gen2-usb.c
index a99a6953f11c..db3ab34cddb4 100644
--- a/drivers/usb/phy/phy-rcar-gen2-usb.c
+++ b/drivers/usb/phy/phy-rcar-gen2-usb.c
@@ -107,10 +107,10 @@ static void __rcar_gen2_usb_phy_init(struct rcar_gen2_usb_phy_priv *priv)
107 clk_prepare_enable(priv->clk); 107 clk_prepare_enable(priv->clk);
108 108
109 /* Set USB channels in the USBHS UGCTRL2 register */ 109 /* Set USB channels in the USBHS UGCTRL2 register */
110 val = ioread32(priv->base); 110 val = ioread32(priv->base + USBHS_UGCTRL2_REG);
111 val &= ~(USBHS_UGCTRL2_USB0_HS | USBHS_UGCTRL2_USB2_SS); 111 val &= ~(USBHS_UGCTRL2_USB0_HS | USBHS_UGCTRL2_USB2_SS);
112 val |= priv->ugctrl2; 112 val |= priv->ugctrl2;
113 iowrite32(val, priv->base); 113 iowrite32(val, priv->base + USBHS_UGCTRL2_REG);
114} 114}
115 115
116/* Shutdown USB channels */ 116/* Shutdown USB channels */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 9ced8937a8f3..fb0d537435eb 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2123,6 +2123,20 @@ static void ftdi_set_termios(struct tty_struct *tty,
2123 termios->c_cflag |= CRTSCTS; 2123 termios->c_cflag |= CRTSCTS;
2124 } 2124 }
2125 2125
2126 /*
2127 * All FTDI UART chips are limited to CS7/8. We won't pretend to
2128 * support CS5/6 and revert the CSIZE setting instead.
2129 */
2130 if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) {
2131 dev_warn(ddev, "requested CSIZE setting not supported\n");
2132
2133 termios->c_cflag &= ~CSIZE;
2134 if (old_termios)
2135 termios->c_cflag |= old_termios->c_cflag & CSIZE;
2136 else
2137 termios->c_cflag |= CS8;
2138 }
2139
2126 cflag = termios->c_cflag; 2140 cflag = termios->c_cflag;
2127 2141
2128 if (!old_termios) 2142 if (!old_termios)
@@ -2159,19 +2173,16 @@ no_skip:
2159 } else { 2173 } else {
2160 urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE; 2174 urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
2161 } 2175 }
2162 if (cflag & CSIZE) { 2176 switch (cflag & CSIZE) {
2163 switch (cflag & CSIZE) { 2177 case CS7:
2164 case CS7: 2178 urb_value |= 7;
2165 urb_value |= 7; 2179 dev_dbg(ddev, "Setting CS7\n");
2166 dev_dbg(ddev, "Setting CS7\n"); 2180 break;
2167 break; 2181 default:
2168 case CS8: 2182 case CS8:
2169 urb_value |= 8; 2183 urb_value |= 8;
2170 dev_dbg(ddev, "Setting CS8\n"); 2184 dev_dbg(ddev, "Setting CS8\n");
2171 break; 2185 break;
2172 default:
2173 dev_err(ddev, "CSIZE was set but not CS7-CS8\n");
2174 }
2175 } 2186 }
2176 2187
2177 /* This is needed by the break command since it uses the same command 2188 /* This is needed by the break command since it uses the same command
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 2b01ec8651c2..b63ce023f96f 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -173,16 +173,8 @@ retry:
173 clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); 173 clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
174 return result; 174 return result;
175 } 175 }
176 /*
177 * Try sending off another urb, unless called from completion handler
178 * (in which case there will be no free urb or no data).
179 */
180 if (mem_flags != GFP_ATOMIC)
181 goto retry;
182 176
183 clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); 177 goto retry; /* try sending off another urb */
184
185 return 0;
186} 178}
187EXPORT_SYMBOL_GPL(usb_serial_generic_write_start); 179EXPORT_SYMBOL_GPL(usb_serial_generic_write_start);
188 180
@@ -208,7 +200,7 @@ int usb_serial_generic_write(struct tty_struct *tty,
208 return 0; 200 return 0;
209 201
210 count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock); 202 count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
211 result = usb_serial_generic_write_start(port, GFP_KERNEL); 203 result = usb_serial_generic_write_start(port, GFP_ATOMIC);
212 if (result) 204 if (result)
213 return result; 205 return result;
214 206
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index e5bdd987b9e8..a69da83604c0 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1813,25 +1813,25 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
1813 iflag = tty->termios.c_iflag; 1813 iflag = tty->termios.c_iflag;
1814 1814
1815 /* Change the number of bits */ 1815 /* Change the number of bits */
1816 if (cflag & CSIZE) { 1816 switch (cflag & CSIZE) {
1817 switch (cflag & CSIZE) { 1817 case CS5:
1818 case CS5: 1818 lData = LCR_BITS_5;
1819 lData = LCR_BITS_5; 1819 break;
1820 break;
1821 1820
1822 case CS6: 1821 case CS6:
1823 lData = LCR_BITS_6; 1822 lData = LCR_BITS_6;
1824 break; 1823 break;
1825 1824
1826 case CS7: 1825 case CS7:
1827 lData = LCR_BITS_7; 1826 lData = LCR_BITS_7;
1828 break; 1827 break;
1829 default: 1828
1830 case CS8: 1829 default:
1831 lData = LCR_BITS_8; 1830 case CS8:
1832 break; 1831 lData = LCR_BITS_8;
1833 } 1832 break;
1834 } 1833 }
1834
1835 /* Change the Parity bit */ 1835 /* Change the Parity bit */
1836 if (cflag & PARENB) { 1836 if (cflag & PARENB) {
1837 if (cflag & PARODD) { 1837 if (cflag & PARODD) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c3d94853b4ab..496b7e39d5be 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -85,6 +85,7 @@ static void option_instat_callback(struct urb *urb);
85#define HUAWEI_PRODUCT_K4505 0x1464 85#define HUAWEI_PRODUCT_K4505 0x1464
86#define HUAWEI_PRODUCT_K3765 0x1465 86#define HUAWEI_PRODUCT_K3765 0x1465
87#define HUAWEI_PRODUCT_K4605 0x14C6 87#define HUAWEI_PRODUCT_K4605 0x14C6
88#define HUAWEI_PRODUCT_E173S6 0x1C07
88 89
89#define QUANTA_VENDOR_ID 0x0408 90#define QUANTA_VENDOR_ID 0x0408
90#define QUANTA_PRODUCT_Q101 0xEA02 91#define QUANTA_PRODUCT_Q101 0xEA02
@@ -572,6 +573,8 @@ static const struct usb_device_id option_ids[] = {
572 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, 573 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
573 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), 574 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
574 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, 575 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
576 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
577 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
575 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), 578 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
576 .driver_info = (kernel_ulong_t) &net_intf2_blacklist }, 579 .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
577 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, 580 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
@@ -634,6 +637,10 @@ static const struct usb_device_id option_ids[] = {
634 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, 637 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
635 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, 638 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
636 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, 639 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
640 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) },
641 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) },
642 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) },
643 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) },
637 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, 644 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
638 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, 645 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
639 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, 646 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
@@ -688,6 +695,10 @@ static const struct usb_device_id option_ids[] = {
688 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, 695 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
689 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, 696 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
690 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, 697 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
698 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) },
699 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) },
700 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) },
701 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) },
691 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, 702 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
692 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, 703 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
693 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, 704 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
@@ -742,6 +753,10 @@ static const struct usb_device_id option_ids[] = {
742 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) }, 753 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
743 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) }, 754 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
744 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) }, 755 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
756 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) },
757 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) },
758 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) },
759 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) },
745 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) }, 760 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
746 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) }, 761 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
747 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) }, 762 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
@@ -796,6 +811,10 @@ static const struct usb_device_id option_ids[] = {
796 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) }, 811 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
797 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) }, 812 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
798 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) }, 813 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
814 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) },
815 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) },
816 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) },
817 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) },
799 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) }, 818 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
800 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) }, 819 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
801 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) }, 820 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
@@ -850,6 +869,10 @@ static const struct usb_device_id option_ids[] = {
850 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) }, 869 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
851 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) }, 870 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
852 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) }, 871 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
872 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) },
873 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) },
874 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) },
875 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) },
853 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) }, 876 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
854 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) }, 877 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
855 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) }, 878 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
@@ -904,6 +927,10 @@ static const struct usb_device_id option_ids[] = {
904 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) }, 927 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
905 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) }, 928 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
906 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) }, 929 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
930 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) },
931 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) },
932 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) },
933 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) },
907 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) }, 934 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
908 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) }, 935 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
909 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) }, 936 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1e6de4cd079d..1e3318dfa1cb 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -361,23 +361,21 @@ static void pl2303_set_termios(struct tty_struct *tty,
361 0, 0, buf, 7, 100); 361 0, 0, buf, 7, 100);
362 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); 362 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
363 363
364 if (C_CSIZE(tty)) { 364 switch (C_CSIZE(tty)) {
365 switch (C_CSIZE(tty)) { 365 case CS5:
366 case CS5: 366 buf[6] = 5;
367 buf[6] = 5; 367 break;
368 break; 368 case CS6:
369 case CS6: 369 buf[6] = 6;
370 buf[6] = 6; 370 break;
371 break; 371 case CS7:
372 case CS7: 372 buf[6] = 7;
373 buf[6] = 7; 373 break;
374 break; 374 default:
375 default: 375 case CS8:
376 case CS8: 376 buf[6] = 8;
377 buf[6] = 8;
378 }
379 dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
380 } 377 }
378 dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
381 379
382 /* For reference buf[0]:buf[3] baud rate value */ 380 /* For reference buf[0]:buf[3] baud rate value */
383 pl2303_encode_baudrate(tty, port, &buf[0]); 381 pl2303_encode_baudrate(tty, port, &buf[0]);
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 4abac28b5992..5b793c352267 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -348,22 +348,20 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
348 } 348 }
349 349
350 /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */ 350 /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
351 if (cflag & CSIZE) { 351 switch (cflag & CSIZE) {
352 switch (cflag & CSIZE) { 352 case CS5:
353 case CS5: 353 buf[1] |= SET_UART_FORMAT_SIZE_5;
354 buf[1] |= SET_UART_FORMAT_SIZE_5; 354 break;
355 break; 355 case CS6:
356 case CS6: 356 buf[1] |= SET_UART_FORMAT_SIZE_6;
357 buf[1] |= SET_UART_FORMAT_SIZE_6; 357 break;
358 break; 358 case CS7:
359 case CS7: 359 buf[1] |= SET_UART_FORMAT_SIZE_7;
360 buf[1] |= SET_UART_FORMAT_SIZE_7; 360 break;
361 break; 361 default:
362 default: 362 case CS8:
363 case CS8: 363 buf[1] |= SET_UART_FORMAT_SIZE_8;
364 buf[1] |= SET_UART_FORMAT_SIZE_8; 364 break;
365 break;
366 }
367 } 365 }
368 366
369 /* Set Stop bit2 : 0:1bit 1:2bit */ 367 /* Set Stop bit2 : 0:1bit 1:2bit */
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index e538b72c4e3a..f14e7929ba22 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -97,18 +97,12 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work);
97 97
98static void wusb_dev_free(struct wusb_dev *wusb_dev) 98static void wusb_dev_free(struct wusb_dev *wusb_dev)
99{ 99{
100 if (wusb_dev) { 100 kfree(wusb_dev);
101 kfree(wusb_dev->set_gtk_req);
102 usb_free_urb(wusb_dev->set_gtk_urb);
103 kfree(wusb_dev);
104 }
105} 101}
106 102
107static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) 103static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
108{ 104{
109 struct wusb_dev *wusb_dev; 105 struct wusb_dev *wusb_dev;
110 struct urb *urb;
111 struct usb_ctrlrequest *req;
112 106
113 wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL); 107 wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);
114 if (wusb_dev == NULL) 108 if (wusb_dev == NULL)
@@ -118,22 +112,6 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
118 112
119 INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); 113 INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work);
120 114
121 urb = usb_alloc_urb(0, GFP_KERNEL);
122 if (urb == NULL)
123 goto err;
124 wusb_dev->set_gtk_urb = urb;
125
126 req = kmalloc(sizeof(*req), GFP_KERNEL);
127 if (req == NULL)
128 goto err;
129 wusb_dev->set_gtk_req = req;
130
131 req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
132 req->bRequest = USB_REQ_SET_DESCRIPTOR;
133 req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index);
134 req->wIndex = 0;
135 req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
136
137 return wusb_dev; 115 return wusb_dev;
138err: 116err:
139 wusb_dev_free(wusb_dev); 117 wusb_dev_free(wusb_dev);
@@ -411,9 +389,6 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
411/* 389/*
412 * Refresh the list of keep alives to emit in the MMC 390 * Refresh the list of keep alives to emit in the MMC
413 * 391 *
414 * Some devices don't respond to keep alives unless they've been
415 * authenticated, so skip unauthenticated devices.
416 *
417 * We only publish the first four devices that have a coming timeout 392 * We only publish the first four devices that have a coming timeout
418 * condition. Then when we are done processing those, we go for the 393 * condition. Then when we are done processing those, we go for the
419 * next ones. We ignore the ones that have timed out already (they'll 394 * next ones. We ignore the ones that have timed out already (they'll
@@ -448,7 +423,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
448 423
449 if (wusb_dev == NULL) 424 if (wusb_dev == NULL)
450 continue; 425 continue;
451 if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated) 426 if (wusb_dev->usb_dev == NULL)
452 continue; 427 continue;
453 428
454 if (time_after(jiffies, wusb_dev->entry_ts + tt)) { 429 if (time_after(jiffies, wusb_dev->entry_ts + tt)) {
@@ -524,11 +499,19 @@ static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)
524 * 499 *
525 * @wusbhc shall be referenced and unlocked 500 * @wusbhc shall be referenced and unlocked
526 */ 501 */
527static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 502static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, u8 srcaddr)
528{ 503{
504 struct wusb_dev *wusb_dev;
505
529 mutex_lock(&wusbhc->mutex); 506 mutex_lock(&wusbhc->mutex);
530 wusb_dev->entry_ts = jiffies; 507 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
531 __wusbhc_keep_alive(wusbhc); 508 if (wusb_dev == NULL) {
509 dev_dbg(wusbhc->dev, "ignoring DN_Alive from unconnected device %02x\n",
510 srcaddr);
511 } else {
512 wusb_dev->entry_ts = jiffies;
513 __wusbhc_keep_alive(wusbhc);
514 }
532 mutex_unlock(&wusbhc->mutex); 515 mutex_unlock(&wusbhc->mutex);
533} 516}
534 517
@@ -582,14 +565,22 @@ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
582 * 565 *
583 * @wusbhc shall be referenced and unlocked 566 * @wusbhc shall be referenced and unlocked
584 */ 567 */
585static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 568static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, u8 srcaddr)
586{ 569{
587 struct device *dev = wusbhc->dev; 570 struct device *dev = wusbhc->dev;
588 571 struct wusb_dev *wusb_dev;
589 dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr);
590 572
591 mutex_lock(&wusbhc->mutex); 573 mutex_lock(&wusbhc->mutex);
592 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); 574 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
575 if (wusb_dev == NULL) {
576 dev_dbg(dev, "ignoring DN DISCONNECT from unconnected device %02x\n",
577 srcaddr);
578 } else {
579 dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n",
580 wusb_dev->addr);
581 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc,
582 wusb_dev->port_idx));
583 }
593 mutex_unlock(&wusbhc->mutex); 584 mutex_unlock(&wusbhc->mutex);
594} 585}
595 586
@@ -611,30 +602,21 @@ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,
611 struct wusb_dn_hdr *dn_hdr, size_t size) 602 struct wusb_dn_hdr *dn_hdr, size_t size)
612{ 603{
613 struct device *dev = wusbhc->dev; 604 struct device *dev = wusbhc->dev;
614 struct wusb_dev *wusb_dev;
615 605
616 if (size < sizeof(struct wusb_dn_hdr)) { 606 if (size < sizeof(struct wusb_dn_hdr)) {
617 dev_err(dev, "DN data shorter than DN header (%d < %d)\n", 607 dev_err(dev, "DN data shorter than DN header (%d < %d)\n",
618 (int)size, (int)sizeof(struct wusb_dn_hdr)); 608 (int)size, (int)sizeof(struct wusb_dn_hdr));
619 return; 609 return;
620 } 610 }
621
622 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
623 if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) {
624 dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n",
625 dn_hdr->bType, srcaddr);
626 return;
627 }
628
629 switch (dn_hdr->bType) { 611 switch (dn_hdr->bType) {
630 case WUSB_DN_CONNECT: 612 case WUSB_DN_CONNECT:
631 wusbhc_handle_dn_connect(wusbhc, dn_hdr, size); 613 wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);
632 break; 614 break;
633 case WUSB_DN_ALIVE: 615 case WUSB_DN_ALIVE:
634 wusbhc_handle_dn_alive(wusbhc, wusb_dev); 616 wusbhc_handle_dn_alive(wusbhc, srcaddr);
635 break; 617 break;
636 case WUSB_DN_DISCONNECT: 618 case WUSB_DN_DISCONNECT:
637 wusbhc_handle_dn_disconnect(wusbhc, wusb_dev); 619 wusbhc_handle_dn_disconnect(wusbhc, srcaddr);
638 break; 620 break;
639 case WUSB_DN_MASAVAILCHANGED: 621 case WUSB_DN_MASAVAILCHANGED:
640 case WUSB_DN_RWAKE: 622 case WUSB_DN_RWAKE:
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index dd88441c8f78..4c40d0dbf53d 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -29,19 +29,16 @@
29#include <linux/export.h> 29#include <linux/export.h>
30#include "wusbhc.h" 30#include "wusbhc.h"
31 31
32static void wusbhc_set_gtk_callback(struct urb *urb); 32static void wusbhc_gtk_rekey_work(struct work_struct *work);
33static void wusbhc_gtk_rekey_done_work(struct work_struct *work);
34 33
35int wusbhc_sec_create(struct wusbhc *wusbhc) 34int wusbhc_sec_create(struct wusbhc *wusbhc)
36{ 35{
37 wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data); 36 wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data);
38 wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY; 37 wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
39 wusbhc->gtk.descr.bReserved = 0; 38 wusbhc->gtk.descr.bReserved = 0;
39 wusbhc->gtk_index = 0;
40 40
41 wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, 41 INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work);
42 WUSB_KEY_INDEX_ORIGINATOR_HOST);
43
44 INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work);
45 42
46 return 0; 43 return 0;
47} 44}
@@ -113,7 +110,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc)
113 wusbhc_generate_gtk(wusbhc); 110 wusbhc_generate_gtk(wusbhc);
114 111
115 result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, 112 result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
116 &wusbhc->gtk.descr.bKeyData, key_size); 113 &wusbhc->gtk.descr.bKeyData, key_size);
117 if (result < 0) 114 if (result < 0)
118 dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n", 115 dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
119 result); 116 result);
@@ -129,7 +126,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc)
129 */ 126 */
130void wusbhc_sec_stop(struct wusbhc *wusbhc) 127void wusbhc_sec_stop(struct wusbhc *wusbhc)
131{ 128{
132 cancel_work_sync(&wusbhc->gtk_rekey_done_work); 129 cancel_work_sync(&wusbhc->gtk_rekey_work);
133} 130}
134 131
135 132
@@ -185,12 +182,14 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
185static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 182static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
186{ 183{
187 struct usb_device *usb_dev = wusb_dev->usb_dev; 184 struct usb_device *usb_dev = wusb_dev->usb_dev;
185 u8 key_index = wusb_key_index(wusbhc->gtk_index,
186 WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST);
188 187
189 return usb_control_msg( 188 return usb_control_msg(
190 usb_dev, usb_sndctrlpipe(usb_dev, 0), 189 usb_dev, usb_sndctrlpipe(usb_dev, 0),
191 USB_REQ_SET_DESCRIPTOR, 190 USB_REQ_SET_DESCRIPTOR,
192 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, 191 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
193 USB_DT_KEY << 8 | wusbhc->gtk_index, 0, 192 USB_DT_KEY << 8 | key_index, 0,
194 &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, 193 &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
195 1000); 194 1000);
196} 195}
@@ -520,24 +519,55 @@ error_kzalloc:
520 * Once all connected and authenticated devices have received the new 519 * Once all connected and authenticated devices have received the new
521 * GTK, switch the host to using it. 520 * GTK, switch the host to using it.
522 */ 521 */
523static void wusbhc_gtk_rekey_done_work(struct work_struct *work) 522static void wusbhc_gtk_rekey_work(struct work_struct *work)
524{ 523{
525 struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work); 524 struct wusbhc *wusbhc = container_of(work,
525 struct wusbhc, gtk_rekey_work);
526 size_t key_size = sizeof(wusbhc->gtk.data); 526 size_t key_size = sizeof(wusbhc->gtk.data);
527 int port_idx;
528 struct wusb_dev *wusb_dev, *wusb_dev_next;
529 LIST_HEAD(rekey_list);
527 530
528 mutex_lock(&wusbhc->mutex); 531 mutex_lock(&wusbhc->mutex);
532 /* generate the new key */
533 wusbhc_generate_gtk(wusbhc);
534 /* roll the gtk index. */
535 wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1);
536 /*
537 * Save all connected devices on a list while holding wusbhc->mutex and
538 * take a reference to each one. Then submit the set key request to
539 * them after releasing the lock in order to avoid a deadlock.
540 */
541 for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) {
542 wusb_dev = wusbhc->port[port_idx].wusb_dev;
543 if (!wusb_dev || !wusb_dev->usb_dev
544 || !wusb_dev->usb_dev->authenticated)
545 continue;
529 546
530 if (--wusbhc->pending_set_gtks == 0) 547 wusb_dev_get(wusb_dev);
531 wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); 548 list_add_tail(&wusb_dev->rekey_node, &rekey_list);
532 549 }
533 mutex_unlock(&wusbhc->mutex); 550 mutex_unlock(&wusbhc->mutex);
534}
535 551
536static void wusbhc_set_gtk_callback(struct urb *urb) 552 /* Submit the rekey requests without holding wusbhc->mutex. */
537{ 553 list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list,
538 struct wusbhc *wusbhc = urb->context; 554 rekey_node) {
555 list_del_init(&wusb_dev->rekey_node);
556 dev_dbg(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d\n",
557 __func__, wusb_dev->port_idx);
558
559 if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) {
560 dev_err(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d failed\n",
561 __func__, wusb_dev->port_idx);
562 }
563 wusb_dev_put(wusb_dev);
564 }
539 565
540 queue_work(wusbd, &wusbhc->gtk_rekey_done_work); 566 /* Switch the host controller to use the new GTK. */
567 mutex_lock(&wusbhc->mutex);
568 wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
569 &wusbhc->gtk.descr.bKeyData, key_size);
570 mutex_unlock(&wusbhc->mutex);
541} 571}
542 572
543/** 573/**
@@ -553,26 +583,12 @@ static void wusbhc_set_gtk_callback(struct urb *urb)
553 */ 583 */
554void wusbhc_gtk_rekey(struct wusbhc *wusbhc) 584void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
555{ 585{
556 static const size_t key_size = sizeof(wusbhc->gtk.data); 586 /*
557 int p; 587 * We need to submit a URB to the downstream WUSB devices in order to
558 588 * change the group key. This can't be done while holding the
559 wusbhc_generate_gtk(wusbhc); 589 * wusbhc->mutex since that is also taken in the urb_enqueue routine
560 590 * and will cause a deadlock. Instead, queue a work item to do
561 for (p = 0; p < wusbhc->ports_max; p++) { 591 * it when the lock is not held
562 struct wusb_dev *wusb_dev; 592 */
563 593 queue_work(wusbd, &wusbhc->gtk_rekey_work);
564 wusb_dev = wusbhc->port[p].wusb_dev;
565 if (!wusb_dev || !wusb_dev->usb_dev || !wusb_dev->usb_dev->authenticated)
566 continue;
567
568 usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev,
569 usb_sndctrlpipe(wusb_dev->usb_dev, 0),
570 (void *)wusb_dev->set_gtk_req,
571 &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
572 wusbhc_set_gtk_callback, wusbhc);
573 if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0)
574 wusbhc->pending_set_gtks++;
575 }
576 if (wusbhc->pending_set_gtks == 0)
577 wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
578} 594}
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 711b1952b114..6bd3b819a6b5 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -97,6 +97,7 @@ struct wusb_dev {
97 struct kref refcnt; 97 struct kref refcnt;
98 struct wusbhc *wusbhc; 98 struct wusbhc *wusbhc;
99 struct list_head cack_node; /* Connect-Ack list */ 99 struct list_head cack_node; /* Connect-Ack list */
100 struct list_head rekey_node; /* GTK rekey list */
100 u8 port_idx; 101 u8 port_idx;
101 u8 addr; 102 u8 addr;
102 u8 beacon_type:4; 103 u8 beacon_type:4;
@@ -107,8 +108,6 @@ struct wusb_dev {
107 struct usb_wireless_cap_descriptor *wusb_cap_descr; 108 struct usb_wireless_cap_descriptor *wusb_cap_descr;
108 struct uwb_mas_bm availability; 109 struct uwb_mas_bm availability;
109 struct work_struct devconnect_acked_work; 110 struct work_struct devconnect_acked_work;
110 struct urb *set_gtk_urb;
111 struct usb_ctrlrequest *set_gtk_req;
112 struct usb_device *usb_dev; 111 struct usb_device *usb_dev;
113}; 112};
114 113
@@ -296,8 +295,7 @@ struct wusbhc {
296 } __attribute__((packed)) gtk; 295 } __attribute__((packed)) gtk;
297 u8 gtk_index; 296 u8 gtk_index;
298 u32 gtk_tkid; 297 u32 gtk_tkid;
299 struct work_struct gtk_rekey_done_work; 298 struct work_struct gtk_rekey_work;
300 int pending_set_gtks;
301 299
302 struct usb_encryption_descriptor *ccm1_etd; 300 struct usb_encryption_descriptor *ccm1_etd;
303}; 301};
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e663921eebb6..f175629513ed 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -2168,15 +2168,15 @@ static int tcm_vhost_register_configfs(void)
2168 /* 2168 /*
2169 * Setup default attribute lists for various fabric->tf_cit_tmpl 2169 * Setup default attribute lists for various fabric->tf_cit_tmpl
2170 */ 2170 */
2171 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; 2171 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2172 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; 2172 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2173 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 2173 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2174 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 2174 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2175 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 2175 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2176 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2176 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2177 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2177 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2178 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2178 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2179 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2179 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2180 /* 2180 /*
2181 * Register the fabric for use within TCM 2181 * Register the fabric for use within TCM
2182 */ 2182 */
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 8521051cf946..cd961622f9c1 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -131,6 +131,7 @@ static const struct platform_device_id atmel_lcdfb_devtypes[] = {
131 /* terminator */ 131 /* terminator */
132 } 132 }
133}; 133};
134MODULE_DEVICE_TABLE(platform, atmel_lcdfb_devtypes);
134 135
135static struct atmel_lcdfb_config * 136static struct atmel_lcdfb_config *
136atmel_lcdfb_get_config(struct platform_device *pdev) 137atmel_lcdfb_get_config(struct platform_device *pdev)
diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
index 50c857477e4f..65041e15fd59 100644
--- a/drivers/video/kyro/fbdev.c
+++ b/drivers/video/kyro/fbdev.c
@@ -624,15 +624,15 @@ static int kyrofb_ioctl(struct fb_info *info,
624 return -EINVAL; 624 return -EINVAL;
625 } 625 }
626 case KYRO_IOCTL_UVSTRIDE: 626 case KYRO_IOCTL_UVSTRIDE:
627 if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(unsigned long))) 627 if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(deviceInfo.ulOverlayUVStride)))
628 return -EFAULT; 628 return -EFAULT;
629 break; 629 break;
630 case KYRO_IOCTL_STRIDE: 630 case KYRO_IOCTL_STRIDE:
631 if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(unsigned long))) 631 if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(deviceInfo.ulOverlayStride)))
632 return -EFAULT; 632 return -EFAULT;
633 break; 633 break;
634 case KYRO_IOCTL_OVERLAY_OFFSET: 634 case KYRO_IOCTL_OVERLAY_OFFSET:
635 if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(unsigned long))) 635 if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(deviceInfo.ulOverlayOffset)))
636 return -EFAULT; 636 return -EFAULT;
637 break; 637 break;
638 } 638 }
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 9dbea2223401..7d44d669d5b6 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -91,6 +91,15 @@ extern boot_infos_t *boot_infos;
91#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4 91#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
92#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8 92#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
93 93
94#define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp)))
95
96static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value)
97{
98 u32 bpp = info->var.bits_per_pixel;
99
100 return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp);
101}
102
94 /* 103 /*
95 * Set a single color register. The values supplied are already 104 * Set a single color register. The values supplied are already
96 * rounded down to the hardware's capabilities (according to the 105 * rounded down to the hardware's capabilities (according to the
@@ -120,7 +129,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
120 mask <<= info->var.transp.offset; 129 mask <<= info->var.transp.offset;
121 value |= mask; 130 value |= mask;
122 } 131 }
123 pal[regno] = value; 132 pal[regno] = offb_cmap_byteswap(info, value);
124 return 0; 133 return 0;
125 } 134 }
126 135
@@ -301,7 +310,7 @@ static struct fb_ops offb_ops = {
301static void __iomem *offb_map_reg(struct device_node *np, int index, 310static void __iomem *offb_map_reg(struct device_node *np, int index,
302 unsigned long offset, unsigned long size) 311 unsigned long offset, unsigned long size)
303{ 312{
304 const u32 *addrp; 313 const __be32 *addrp;
305 u64 asize, taddr; 314 u64 asize, taddr;
306 unsigned int flags; 315 unsigned int flags;
307 316
@@ -369,7 +378,11 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
369 } 378 }
370 of_node_put(pciparent); 379 of_node_put(pciparent);
371 } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) { 380 } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) {
372 const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 }; 381#ifdef __BIG_ENDIAN
382 const __be32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
383#else
384 const __be32 io_of_addr[3] = { 0x00000001, 0x0, 0x0 };
385#endif
373 u64 io_addr = of_translate_address(dp, io_of_addr); 386 u64 io_addr = of_translate_address(dp, io_of_addr);
374 if (io_addr != OF_BAD_ADDR) { 387 if (io_addr != OF_BAD_ADDR) {
375 par->cmap_adr = ioremap(io_addr + 0x3c8, 2); 388 par->cmap_adr = ioremap(io_addr + 0x3c8, 2);
@@ -535,7 +548,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
535 unsigned int flags, rsize, addr_prop = 0; 548 unsigned int flags, rsize, addr_prop = 0;
536 unsigned long max_size = 0; 549 unsigned long max_size = 0;
537 u64 rstart, address = OF_BAD_ADDR; 550 u64 rstart, address = OF_BAD_ADDR;
538 const u32 *pp, *addrp, *up; 551 const __be32 *pp, *addrp, *up;
539 u64 asize; 552 u64 asize;
540 int foreign_endian = 0; 553 int foreign_endian = 0;
541 554
@@ -551,25 +564,25 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
551 if (pp == NULL) 564 if (pp == NULL)
552 pp = of_get_property(dp, "depth", &len); 565 pp = of_get_property(dp, "depth", &len);
553 if (pp && len == sizeof(u32)) 566 if (pp && len == sizeof(u32))
554 depth = *pp; 567 depth = be32_to_cpup(pp);
555 568
556 pp = of_get_property(dp, "linux,bootx-width", &len); 569 pp = of_get_property(dp, "linux,bootx-width", &len);
557 if (pp == NULL) 570 if (pp == NULL)
558 pp = of_get_property(dp, "width", &len); 571 pp = of_get_property(dp, "width", &len);
559 if (pp && len == sizeof(u32)) 572 if (pp && len == sizeof(u32))
560 width = *pp; 573 width = be32_to_cpup(pp);
561 574
562 pp = of_get_property(dp, "linux,bootx-height", &len); 575 pp = of_get_property(dp, "linux,bootx-height", &len);
563 if (pp == NULL) 576 if (pp == NULL)
564 pp = of_get_property(dp, "height", &len); 577 pp = of_get_property(dp, "height", &len);
565 if (pp && len == sizeof(u32)) 578 if (pp && len == sizeof(u32))
566 height = *pp; 579 height = be32_to_cpup(pp);
567 580
568 pp = of_get_property(dp, "linux,bootx-linebytes", &len); 581 pp = of_get_property(dp, "linux,bootx-linebytes", &len);
569 if (pp == NULL) 582 if (pp == NULL)
570 pp = of_get_property(dp, "linebytes", &len); 583 pp = of_get_property(dp, "linebytes", &len);
571 if (pp && len == sizeof(u32) && (*pp != 0xffffffffu)) 584 if (pp && len == sizeof(u32) && (*pp != 0xffffffffu))
572 pitch = *pp; 585 pitch = be32_to_cpup(pp);
573 else 586 else
574 pitch = width * ((depth + 7) / 8); 587 pitch = width * ((depth + 7) / 8);
575 588
diff --git a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
index e6d56f714ae4..d94f35dbd536 100644
--- a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
@@ -526,6 +526,8 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
526 struct omap_dss_device *in = ddata->in; 526 struct omap_dss_device *in = ddata->in;
527 int r; 527 int r;
528 528
529 mutex_lock(&ddata->mutex);
530
529 dev_dbg(&ddata->spi->dev, "%s\n", __func__); 531 dev_dbg(&ddata->spi->dev, "%s\n", __func__);
530 532
531 in->ops.sdi->set_timings(in, &ddata->videomode); 533 in->ops.sdi->set_timings(in, &ddata->videomode);
@@ -614,10 +616,7 @@ static int acx565akm_enable(struct omap_dss_device *dssdev)
614 if (omapdss_device_is_enabled(dssdev)) 616 if (omapdss_device_is_enabled(dssdev))
615 return 0; 617 return 0;
616 618
617 mutex_lock(&ddata->mutex);
618 r = acx565akm_panel_power_on(dssdev); 619 r = acx565akm_panel_power_on(dssdev);
619 mutex_unlock(&ddata->mutex);
620
621 if (r) 620 if (r)
622 return r; 621 return r;
623 622
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index e0f098562a74..a297de5cc859 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -569,6 +569,7 @@ EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update);
569 * Power management 569 * Power management
570 */ 570 */
571 571
572#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME)
572static int sh_mobile_meram_suspend(struct device *dev) 573static int sh_mobile_meram_suspend(struct device *dev)
573{ 574{
574 struct platform_device *pdev = to_platform_device(dev); 575 struct platform_device *pdev = to_platform_device(dev);
@@ -611,6 +612,7 @@ static int sh_mobile_meram_resume(struct device *dev)
611 meram_write_reg(priv->base, common_regs[i], priv->regs[i]); 612 meram_write_reg(priv->base, common_regs[i], priv->regs[i]);
612 return 0; 613 return 0;
613} 614}
615#endif /* CONFIG_PM_SLEEP || CONFIG_PM_RUNTIME */
614 616
615static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops, 617static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops,
616 sh_mobile_meram_suspend, 618 sh_mobile_meram_suspend,
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index b30e5a439d1f..a8f2b280f796 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -293,8 +293,7 @@ static int vt8500lcd_probe(struct platform_device *pdev)
293 + sizeof(u32) * 16, GFP_KERNEL); 293 + sizeof(u32) * 16, GFP_KERNEL);
294 if (!fbi) { 294 if (!fbi) {
295 dev_err(&pdev->dev, "Failed to initialize framebuffer device\n"); 295 dev_err(&pdev->dev, "Failed to initialize framebuffer device\n");
296 ret = -ENOMEM; 296 return -ENOMEM;
297 goto failed;
298 } 297 }
299 298
300 strcpy(fbi->fb.fix.id, "VT8500 LCD"); 299 strcpy(fbi->fb.fix.id, "VT8500 LCD");
@@ -327,15 +326,13 @@ static int vt8500lcd_probe(struct platform_device *pdev)
327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 326 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
328 if (res == NULL) { 327 if (res == NULL) {
329 dev_err(&pdev->dev, "no I/O memory resource defined\n"); 328 dev_err(&pdev->dev, "no I/O memory resource defined\n");
330 ret = -ENODEV; 329 return -ENODEV;
331 goto failed_fbi;
332 } 330 }
333 331
334 res = request_mem_region(res->start, resource_size(res), "vt8500lcd"); 332 res = request_mem_region(res->start, resource_size(res), "vt8500lcd");
335 if (res == NULL) { 333 if (res == NULL) {
336 dev_err(&pdev->dev, "failed to request I/O memory\n"); 334 dev_err(&pdev->dev, "failed to request I/O memory\n");
337 ret = -EBUSY; 335 return -EBUSY;
338 goto failed_fbi;
339 } 336 }
340 337
341 fbi->regbase = ioremap(res->start, resource_size(res)); 338 fbi->regbase = ioremap(res->start, resource_size(res));
@@ -346,17 +343,19 @@ static int vt8500lcd_probe(struct platform_device *pdev)
346 } 343 }
347 344
348 disp_timing = of_get_display_timings(pdev->dev.of_node); 345 disp_timing = of_get_display_timings(pdev->dev.of_node);
349 if (!disp_timing) 346 if (!disp_timing) {
350 return -EINVAL; 347 ret = -EINVAL;
348 goto failed_free_io;
349 }
351 350
352 ret = of_get_fb_videomode(pdev->dev.of_node, &of_mode, 351 ret = of_get_fb_videomode(pdev->dev.of_node, &of_mode,
353 OF_USE_NATIVE_MODE); 352 OF_USE_NATIVE_MODE);
354 if (ret) 353 if (ret)
355 return ret; 354 goto failed_free_io;
356 355
357 ret = of_property_read_u32(pdev->dev.of_node, "bits-per-pixel", &bpp); 356 ret = of_property_read_u32(pdev->dev.of_node, "bits-per-pixel", &bpp);
358 if (ret) 357 if (ret)
359 return ret; 358 goto failed_free_io;
360 359
361 /* try allocating the framebuffer */ 360 /* try allocating the framebuffer */
362 fb_mem_len = of_mode.xres * of_mode.yres * 2 * (bpp / 8); 361 fb_mem_len = of_mode.xres * of_mode.yres * 2 * (bpp / 8);
@@ -364,7 +363,8 @@ static int vt8500lcd_probe(struct platform_device *pdev)
364 GFP_KERNEL); 363 GFP_KERNEL);
365 if (!fb_mem_virt) { 364 if (!fb_mem_virt) {
366 pr_err("%s: Failed to allocate framebuffer\n", __func__); 365 pr_err("%s: Failed to allocate framebuffer\n", __func__);
367 return -ENOMEM; 366 ret = -ENOMEM;
367 goto failed_free_io;
368 } 368 }
369 369
370 fbi->fb.fix.smem_start = fb_mem_phys; 370 fbi->fb.fix.smem_start = fb_mem_phys;
@@ -447,9 +447,6 @@ failed_free_io:
447 iounmap(fbi->regbase); 447 iounmap(fbi->regbase);
448failed_free_res: 448failed_free_res:
449 release_mem_region(res->start, resource_size(res)); 449 release_mem_region(res->start, resource_size(res));
450failed_fbi:
451 kfree(fbi);
452failed:
453 return ret; 450 return ret;
454} 451}
455 452
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index a6a2cebb2587..cafa973c43be 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -19,7 +19,6 @@
19#include <linux/watchdog.h> 19#include <linux/watchdog.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/of_address.h> 21#include <linux/of_address.h>
22#include <linux/miscdevice.h>
23 22
24#define PM_RSTC 0x1c 23#define PM_RSTC 0x1c
25#define PM_WDOG 0x24 24#define PM_WDOG 0x24
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 833e81311848..d1d07f2f69df 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -28,7 +28,6 @@
28 28
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/miscdevice.h>
32#include <linux/watchdog.h> 31#include <linux/watchdog.h>
33#include <linux/timer.h> 32#include <linux/timer.h>
34#include <linux/io.h> 33#include <linux/io.h>
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index 70a240297c6d..07f88f54e5c0 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -28,7 +28,6 @@
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/watchdog.h> 30#include <linux/watchdog.h>
31#include <linux/miscdevice.h>
32#include <linux/seq_file.h> 31#include <linux/seq_file.h>
33#include <linux/debugfs.h> 32#include <linux/debugfs.h>
34#include <linux/uaccess.h> 33#include <linux/uaccess.h>
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 2de486a7eea1..3aa50cfa335f 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -17,7 +17,6 @@
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/miscdevice.h>
21#include <linux/watchdog.h> 20#include <linux/watchdog.h>
22#include <linux/init.h> 21#include <linux/init.h>
23#include <linux/platform_device.h> 22#include <linux/platform_device.h>
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index a1a3638c579c..20dc73844737 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -26,7 +26,6 @@
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
29#include <linux/miscdevice.h>
30#include <linux/uaccess.h> 29#include <linux/uaccess.h>
31#include <linux/watchdog.h> 30#include <linux/watchdog.h>
32#include <linux/platform_device.h> 31#include <linux/platform_device.h>
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 6d4f3998e1f6..bdb3f4a5b27c 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -19,7 +19,6 @@
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/miscdevice.h>
23#include <linux/watchdog.h> 22#include <linux/watchdog.h>
24#include <linux/init.h> 23#include <linux/init.h>
25#include <linux/bitops.h> 24#include <linux/bitops.h>
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 44edca66d564..f7722a424676 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -16,7 +16,6 @@
16#include <linux/moduleparam.h> 16#include <linux/moduleparam.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/miscdevice.h>
20#include <linux/platform_device.h> 19#include <linux/platform_device.h>
21#include <linux/watchdog.h> 20#include <linux/watchdog.h>
22#include <linux/init.h> 21#include <linux/init.h>
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 1bdcc313e1d9..5bec20f5dc2d 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -23,7 +23,6 @@
23#include <linux/moduleparam.h> 23#include <linux/moduleparam.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/miscdevice.h>
27#include <linux/watchdog.h> 26#include <linux/watchdog.h>
28#include <linux/init.h> 27#include <linux/init.h>
29#include <linux/platform_device.h> 28#include <linux/platform_device.h>
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 53d37fea183e..d92c2d5859ce 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -16,7 +16,6 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/watchdog.h> 18#include <linux/watchdog.h>
19#include <linux/miscdevice.h>
20#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
21#include <linux/platform_device.h> 20#include <linux/platform_device.h>
22 21
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 3b9fff9dcf65..131193a7acdf 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -409,8 +409,9 @@ static int __init sc1200wdt_init(void)
409#if defined CONFIG_PNP 409#if defined CONFIG_PNP
410 /* now that the user has specified an IO port and we haven't detected 410 /* now that the user has specified an IO port and we haven't detected
411 * any devices, disable pnp support */ 411 * any devices, disable pnp support */
412 if (isapnp)
413 pnp_unregister_driver(&scl200wdt_pnp_driver);
412 isapnp = 0; 414 isapnp = 0;
413 pnp_unregister_driver(&scl200wdt_pnp_driver);
414#endif 415#endif
415 416
416 if (!request_region(io, io_len, SC1200_MODULE_NAME)) { 417 if (!request_region(io, io_len, SC1200_MODULE_NAME)) {
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index f9b8e06f3558..af3528f84d65 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -26,7 +26,6 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/miscdevice.h>
30#include <linux/watchdog.h> 29#include <linux/watchdog.h>
31#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
32#include <linux/fs.h> 31#include <linux/fs.h>
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index ef2638fee4a8..c04a1aa158e2 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -42,7 +42,6 @@
42#include <linux/moduleparam.h> 42#include <linux/moduleparam.h>
43#include <linux/types.h> 43#include <linux/types.h>
44#include <linux/timer.h> 44#include <linux/timer.h>
45#include <linux/miscdevice.h>
46#include <linux/watchdog.h> 45#include <linux/watchdog.h>
47#include <linux/notifier.h> 46#include <linux/notifier.h>
48#include <linux/reboot.h> 47#include <linux/reboot.h>
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index d667f6b51d35..bb64ae3f47da 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -12,7 +12,6 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/miscdevice.h>
16#include <linux/watchdog.h> 15#include <linux/watchdog.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18#include <linux/stmp3xxx_rtc_wdt.h> 17#include <linux/stmp3xxx_rtc_wdt.h>
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 0fd0e8ae62a8..6a447e321dd0 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/miscdevice.h>
17#include <linux/watchdog.h> 16#include <linux/watchdog.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/platform_device.h> 18#include <linux/platform_device.h>
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index e029b5768f2c..5aed9d7ad47e 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -12,7 +12,6 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/miscdevice.h>
16#include <linux/err.h> 15#include <linux/err.h>
17#include <linux/uaccess.h> 16#include <linux/uaccess.h>
18#include <linux/watchdog.h> 17#include <linux/watchdog.h>
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 62ccf5424ba8..028387192b60 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -930,9 +930,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
930 ret = m2p_add_override(mfn, pages[i], kmap_ops ? 930 ret = m2p_add_override(mfn, pages[i], kmap_ops ?
931 &kmap_ops[i] : NULL); 931 &kmap_ops[i] : NULL);
932 if (ret) 932 if (ret)
933 return ret; 933 goto out;
934 } 934 }
935 935
936 out:
936 if (lazy) 937 if (lazy)
937 arch_leave_lazy_mmu_mode(); 938 arch_leave_lazy_mmu_mode();
938 939
@@ -969,9 +970,10 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
969 ret = m2p_remove_override(pages[i], kmap_ops ? 970 ret = m2p_remove_override(pages[i], kmap_ops ?
970 &kmap_ops[i] : NULL); 971 &kmap_ops[i] : NULL);
971 if (ret) 972 if (ret)
972 return ret; 973 goto out;
973 } 974 }
974 975
976 out:
975 if (lazy) 977 if (lazy)
976 arch_leave_lazy_mmu_mode(); 978 arch_leave_lazy_mmu_mode();
977 979
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index d15f6e80479f..188825122aae 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -59,12 +59,12 @@ static int xen_add_device(struct device *dev)
59 add.flags = XEN_PCI_DEV_EXTFN; 59 add.flags = XEN_PCI_DEV_EXTFN;
60 60
61#ifdef CONFIG_ACPI 61#ifdef CONFIG_ACPI
62 handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); 62 handle = ACPI_HANDLE(&pci_dev->dev);
63 if (!handle && pci_dev->bus->bridge) 63 if (!handle && pci_dev->bus->bridge)
64 handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); 64 handle = ACPI_HANDLE(pci_dev->bus->bridge);
65#ifdef CONFIG_PCI_IOV 65#ifdef CONFIG_PCI_IOV
66 if (!handle && pci_dev->is_virtfn) 66 if (!handle && pci_dev->is_virtfn)
67 handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge); 67 handle = ACPI_HANDLE(physfn->bus->bridge);
68#endif 68#endif
69 if (handle) { 69 if (handle) {
70 acpi_status status; 70 acpi_status status;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index a224bc74b6b9..1eac0731c349 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -555,6 +555,11 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
555 sg_dma_len(sgl) = 0; 555 sg_dma_len(sgl) = 0;
556 return 0; 556 return 0;
557 } 557 }
558 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
559 map & ~PAGE_MASK,
560 sg->length,
561 dir,
562 attrs);
558 sg->dma_address = xen_phys_to_bus(map); 563 sg->dma_address = xen_phys_to_bus(map);
559 } else { 564 } else {
560 /* we are not interested in the dma_addr returned by 565 /* we are not interested in the dma_addr returned by
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index f039b104a98e..b03dd23feda8 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -43,23 +43,6 @@
43#include "fid.h" 43#include "fid.h"
44 44
45/** 45/**
46 * v9fs_dentry_delete - called when dentry refcount equals 0
47 * @dentry: dentry in question
48 *
49 * By returning 1 here we should remove cacheing of unused
50 * dentry components.
51 *
52 */
53
54static int v9fs_dentry_delete(const struct dentry *dentry)
55{
56 p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
57 dentry->d_name.name, dentry);
58
59 return 1;
60}
61
62/**
63 * v9fs_cached_dentry_delete - called when dentry refcount equals 0 46 * v9fs_cached_dentry_delete - called when dentry refcount equals 0
64 * @dentry: dentry in question 47 * @dentry: dentry in question
65 * 48 *
@@ -134,6 +117,6 @@ const struct dentry_operations v9fs_cached_dentry_operations = {
134}; 117};
135 118
136const struct dentry_operations v9fs_dentry_operations = { 119const struct dentry_operations v9fs_dentry_operations = {
137 .d_delete = v9fs_dentry_delete, 120 .d_delete = always_delete_dentry,
138 .d_release = v9fs_dentry_release, 121 .d_release = v9fs_dentry_release,
139}; 122};
diff --git a/fs/affs/Changes b/fs/affs/Changes
index a29409c1ffe0..b41c2c9792ff 100644
--- a/fs/affs/Changes
+++ b/fs/affs/Changes
@@ -91,7 +91,7 @@ more 2.4 fixes: [Roman Zippel]
91Version 3.11 91Version 3.11
92------------ 92------------
93 93
94- Converted to use 2.3.x page cache [Dave Jones <dave@powertweak.com>] 94- Converted to use 2.3.x page cache [Dave Jones]
95- Corruption in truncate() bugfix [Ken Tyler <kent@werple.net.au>] 95- Corruption in truncate() bugfix [Ken Tyler <kent@werple.net.au>]
96 96
97Version 3.10 97Version 3.10
diff --git a/fs/aio.c b/fs/aio.c
index 823efcbb6ccd..6efb7f6cb22e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -80,6 +80,8 @@ struct kioctx {
80 struct percpu_ref users; 80 struct percpu_ref users;
81 atomic_t dead; 81 atomic_t dead;
82 82
83 struct percpu_ref reqs;
84
83 unsigned long user_id; 85 unsigned long user_id;
84 86
85 struct __percpu kioctx_cpu *cpu; 87 struct __percpu kioctx_cpu *cpu;
@@ -107,7 +109,6 @@ struct kioctx {
107 struct page **ring_pages; 109 struct page **ring_pages;
108 long nr_pages; 110 long nr_pages;
109 111
110 struct rcu_head rcu_head;
111 struct work_struct free_work; 112 struct work_struct free_work;
112 113
113 struct { 114 struct {
@@ -250,8 +251,10 @@ static void aio_free_ring(struct kioctx *ctx)
250 251
251 put_aio_ring_file(ctx); 252 put_aio_ring_file(ctx);
252 253
253 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) 254 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
254 kfree(ctx->ring_pages); 255 kfree(ctx->ring_pages);
256 ctx->ring_pages = NULL;
257 }
255} 258}
256 259
257static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 260static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
@@ -364,8 +367,10 @@ static int aio_setup_ring(struct kioctx *ctx)
364 if (nr_pages > AIO_RING_PAGES) { 367 if (nr_pages > AIO_RING_PAGES) {
365 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 368 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
366 GFP_KERNEL); 369 GFP_KERNEL);
367 if (!ctx->ring_pages) 370 if (!ctx->ring_pages) {
371 put_aio_ring_file(ctx);
368 return -ENOMEM; 372 return -ENOMEM;
373 }
369 } 374 }
370 375
371 ctx->mmap_size = nr_pages * PAGE_SIZE; 376 ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -463,26 +468,34 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
463 return cancel(kiocb); 468 return cancel(kiocb);
464} 469}
465 470
466static void free_ioctx_rcu(struct rcu_head *head) 471static void free_ioctx(struct work_struct *work)
467{ 472{
468 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); 473 struct kioctx *ctx = container_of(work, struct kioctx, free_work);
474
475 pr_debug("freeing %p\n", ctx);
469 476
477 aio_free_ring(ctx);
470 free_percpu(ctx->cpu); 478 free_percpu(ctx->cpu);
471 kmem_cache_free(kioctx_cachep, ctx); 479 kmem_cache_free(kioctx_cachep, ctx);
472} 480}
473 481
482static void free_ioctx_reqs(struct percpu_ref *ref)
483{
484 struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
485
486 INIT_WORK(&ctx->free_work, free_ioctx);
487 schedule_work(&ctx->free_work);
488}
489
474/* 490/*
475 * When this function runs, the kioctx has been removed from the "hash table" 491 * When this function runs, the kioctx has been removed from the "hash table"
476 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - 492 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
477 * now it's safe to cancel any that need to be. 493 * now it's safe to cancel any that need to be.
478 */ 494 */
479static void free_ioctx(struct work_struct *work) 495static void free_ioctx_users(struct percpu_ref *ref)
480{ 496{
481 struct kioctx *ctx = container_of(work, struct kioctx, free_work); 497 struct kioctx *ctx = container_of(ref, struct kioctx, users);
482 struct aio_ring *ring;
483 struct kiocb *req; 498 struct kiocb *req;
484 unsigned cpu, avail;
485 DEFINE_WAIT(wait);
486 499
487 spin_lock_irq(&ctx->ctx_lock); 500 spin_lock_irq(&ctx->ctx_lock);
488 501
@@ -496,54 +509,8 @@ static void free_ioctx(struct work_struct *work)
496 509
497 spin_unlock_irq(&ctx->ctx_lock); 510 spin_unlock_irq(&ctx->ctx_lock);
498 511
499 for_each_possible_cpu(cpu) { 512 percpu_ref_kill(&ctx->reqs);
500 struct kioctx_cpu *kcpu = per_cpu_ptr(ctx->cpu, cpu); 513 percpu_ref_put(&ctx->reqs);
501
502 atomic_add(kcpu->reqs_available, &ctx->reqs_available);
503 kcpu->reqs_available = 0;
504 }
505
506 while (1) {
507 prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE);
508
509 ring = kmap_atomic(ctx->ring_pages[0]);
510 avail = (ring->head <= ring->tail)
511 ? ring->tail - ring->head
512 : ctx->nr_events - ring->head + ring->tail;
513
514 atomic_add(avail, &ctx->reqs_available);
515 ring->head = ring->tail;
516 kunmap_atomic(ring);
517
518 if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1)
519 break;
520
521 schedule();
522 }
523 finish_wait(&ctx->wait, &wait);
524
525 WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);
526
527 aio_free_ring(ctx);
528
529 pr_debug("freeing %p\n", ctx);
530
531 /*
532 * Here the call_rcu() is between the wait_event() for reqs_active to
533 * hit 0, and freeing the ioctx.
534 *
535 * aio_complete() decrements reqs_active, but it has to touch the ioctx
536 * after to issue a wakeup so we use rcu.
537 */
538 call_rcu(&ctx->rcu_head, free_ioctx_rcu);
539}
540
541static void free_ioctx_ref(struct percpu_ref *ref)
542{
543 struct kioctx *ctx = container_of(ref, struct kioctx, users);
544
545 INIT_WORK(&ctx->free_work, free_ioctx);
546 schedule_work(&ctx->free_work);
547} 514}
548 515
549static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) 516static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
@@ -602,6 +569,16 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
602 } 569 }
603} 570}
604 571
572static void aio_nr_sub(unsigned nr)
573{
574 spin_lock(&aio_nr_lock);
575 if (WARN_ON(aio_nr - nr > aio_nr))
576 aio_nr = 0;
577 else
578 aio_nr -= nr;
579 spin_unlock(&aio_nr_lock);
580}
581
605/* ioctx_alloc 582/* ioctx_alloc
606 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 583 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
607 */ 584 */
@@ -639,8 +616,11 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
639 616
640 ctx->max_reqs = nr_events; 617 ctx->max_reqs = nr_events;
641 618
642 if (percpu_ref_init(&ctx->users, free_ioctx_ref)) 619 if (percpu_ref_init(&ctx->users, free_ioctx_users))
643 goto out_freectx; 620 goto err;
621
622 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
623 goto err;
644 624
645 spin_lock_init(&ctx->ctx_lock); 625 spin_lock_init(&ctx->ctx_lock);
646 spin_lock_init(&ctx->completion_lock); 626 spin_lock_init(&ctx->completion_lock);
@@ -651,10 +631,10 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
651 631
652 ctx->cpu = alloc_percpu(struct kioctx_cpu); 632 ctx->cpu = alloc_percpu(struct kioctx_cpu);
653 if (!ctx->cpu) 633 if (!ctx->cpu)
654 goto out_freeref; 634 goto err;
655 635
656 if (aio_setup_ring(ctx) < 0) 636 if (aio_setup_ring(ctx) < 0)
657 goto out_freepcpu; 637 goto err;
658 638
659 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); 639 atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
660 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); 640 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
@@ -666,7 +646,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
666 if (aio_nr + nr_events > (aio_max_nr * 2UL) || 646 if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
667 aio_nr + nr_events < aio_nr) { 647 aio_nr + nr_events < aio_nr) {
668 spin_unlock(&aio_nr_lock); 648 spin_unlock(&aio_nr_lock);
669 goto out_cleanup; 649 err = -EAGAIN;
650 goto err_ctx;
670 } 651 }
671 aio_nr += ctx->max_reqs; 652 aio_nr += ctx->max_reqs;
672 spin_unlock(&aio_nr_lock); 653 spin_unlock(&aio_nr_lock);
@@ -675,23 +656,20 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
675 656
676 err = ioctx_add_table(ctx, mm); 657 err = ioctx_add_table(ctx, mm);
677 if (err) 658 if (err)
678 goto out_cleanup_put; 659 goto err_cleanup;
679 660
680 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 661 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
681 ctx, ctx->user_id, mm, ctx->nr_events); 662 ctx, ctx->user_id, mm, ctx->nr_events);
682 return ctx; 663 return ctx;
683 664
684out_cleanup_put: 665err_cleanup:
685 percpu_ref_put(&ctx->users); 666 aio_nr_sub(ctx->max_reqs);
686out_cleanup: 667err_ctx:
687 err = -EAGAIN;
688 aio_free_ring(ctx); 668 aio_free_ring(ctx);
689out_freepcpu: 669err:
690 free_percpu(ctx->cpu); 670 free_percpu(ctx->cpu);
691out_freeref: 671 free_percpu(ctx->reqs.pcpu_count);
692 free_percpu(ctx->users.pcpu_count); 672 free_percpu(ctx->users.pcpu_count);
693out_freectx:
694 put_aio_ring_file(ctx);
695 kmem_cache_free(kioctx_cachep, ctx); 673 kmem_cache_free(kioctx_cachep, ctx);
696 pr_debug("error allocating ioctx %d\n", err); 674 pr_debug("error allocating ioctx %d\n", err);
697 return ERR_PTR(err); 675 return ERR_PTR(err);
@@ -726,10 +704,7 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
726 * -EAGAIN with no ioctxs actually in use (as far as userspace 704 * -EAGAIN with no ioctxs actually in use (as far as userspace
727 * could tell). 705 * could tell).
728 */ 706 */
729 spin_lock(&aio_nr_lock); 707 aio_nr_sub(ctx->max_reqs);
730 BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
731 aio_nr -= ctx->max_reqs;
732 spin_unlock(&aio_nr_lock);
733 708
734 if (ctx->mmap_size) 709 if (ctx->mmap_size)
735 vm_munmap(ctx->mmap_base, ctx->mmap_size); 710 vm_munmap(ctx->mmap_base, ctx->mmap_size);
@@ -861,6 +836,8 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
861 if (unlikely(!req)) 836 if (unlikely(!req))
862 goto out_put; 837 goto out_put;
863 838
839 percpu_ref_get(&ctx->reqs);
840
864 req->ki_ctx = ctx; 841 req->ki_ctx = ctx;
865 return req; 842 return req;
866out_put: 843out_put:
@@ -930,12 +907,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
930 return; 907 return;
931 } 908 }
932 909
933 /*
934 * Take rcu_read_lock() in case the kioctx is being destroyed, as we
935 * need to issue a wakeup after incrementing reqs_available.
936 */
937 rcu_read_lock();
938
939 if (iocb->ki_list.next) { 910 if (iocb->ki_list.next) {
940 unsigned long flags; 911 unsigned long flags;
941 912
@@ -1010,7 +981,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
1010 if (waitqueue_active(&ctx->wait)) 981 if (waitqueue_active(&ctx->wait))
1011 wake_up(&ctx->wait); 982 wake_up(&ctx->wait);
1012 983
1013 rcu_read_unlock(); 984 percpu_ref_put(&ctx->reqs);
1014} 985}
1015EXPORT_SYMBOL(aio_complete); 986EXPORT_SYMBOL(aio_complete);
1016 987
@@ -1421,6 +1392,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1421 return 0; 1392 return 0;
1422out_put_req: 1393out_put_req:
1423 put_reqs_available(ctx, 1); 1394 put_reqs_available(ctx, 1);
1395 percpu_ref_put(&ctx->reqs);
1424 kiocb_free(req); 1396 kiocb_free(req);
1425 return ret; 1397 return ret;
1426} 1398}
diff --git a/fs/bio.c b/fs/bio.c
index 2bdb4e25ee77..33d79a4eb92d 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs);
601 601
602static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page 602static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
603 *page, unsigned int len, unsigned int offset, 603 *page, unsigned int len, unsigned int offset,
604 unsigned short max_sectors) 604 unsigned int max_sectors)
605{ 605{
606 int retried_segments = 0; 606 int retried_segments = 0;
607 struct bio_vec *bvec; 607 struct bio_vec *bvec;
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index f9d5094e1029..aa976eced2d2 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -9,12 +9,17 @@ config BTRFS_FS
9 select XOR_BLOCKS 9 select XOR_BLOCKS
10 10
11 help 11 help
12 Btrfs is a new filesystem with extents, writable snapshotting, 12 Btrfs is a general purpose copy-on-write filesystem with extents,
13 support for multiple devices and many more features. 13 writable snapshotting, support for multiple devices and many more
14 features focused on fault tolerance, repair and easy administration.
14 15
15 Btrfs is highly experimental, and THE DISK FORMAT IS NOT YET 16 The filesystem disk format is no longer unstable, and it's not
16 FINALIZED. You should say N here unless you are interested in 17 expected to change unless there are strong reasons to do so. If there
17 testing Btrfs with non-critical data. 18 is a format change, file systems with a unchanged format will
19 continue to be mountable and usable by newer kernels.
20
21 For more information, please see the web pages at
22 http://btrfs.wiki.kernel.org.
18 23
19 To compile this file system support as a module, choose M here. The 24 To compile this file system support as a module, choose M here. The
20 module will be called btrfs. 25 module will be called btrfs.
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 8aec751fa464..c1e0b0caf9cc 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -495,6 +495,7 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
495 spin_lock_irq(&workers->lock); 495 spin_lock_irq(&workers->lock);
496 if (workers->stopping) { 496 if (workers->stopping) {
497 spin_unlock_irq(&workers->lock); 497 spin_unlock_irq(&workers->lock);
498 ret = -EINVAL;
498 goto fail_kthread; 499 goto fail_kthread;
499 } 500 }
500 list_add_tail(&worker->worker_list, &workers->idle_list); 501 list_add_tail(&worker->worker_list, &workers->idle_list);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index e0aab4456974..131d82800b3a 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -77,6 +77,15 @@
77 * the integrity of (super)-block write requests, do not 77 * the integrity of (super)-block write requests, do not
78 * enable the config option BTRFS_FS_CHECK_INTEGRITY to 78 * enable the config option BTRFS_FS_CHECK_INTEGRITY to
79 * include and compile the integrity check tool. 79 * include and compile the integrity check tool.
80 *
81 * Expect millions of lines of information in the kernel log with an
82 * enabled check_int_print_mask. Therefore set LOG_BUF_SHIFT in the
83 * kernel config to at least 26 (which is 64MB). Usually the value is
84 * limited to 21 (which is 2MB) in init/Kconfig. The file needs to be
85 * changed like this before LOG_BUF_SHIFT can be set to a high value:
86 * config LOG_BUF_SHIFT
87 * int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
88 * range 12 30
80 */ 89 */
81 90
82#include <linux/sched.h> 91#include <linux/sched.h>
@@ -124,6 +133,7 @@
124#define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400 133#define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400
125#define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800 134#define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800
126#define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000 135#define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000
136#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE 0x00002000
127 137
128struct btrfsic_dev_state; 138struct btrfsic_dev_state;
129struct btrfsic_state; 139struct btrfsic_state;
@@ -323,7 +333,6 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
323static int btrfsic_read_block(struct btrfsic_state *state, 333static int btrfsic_read_block(struct btrfsic_state *state,
324 struct btrfsic_block_data_ctx *block_ctx); 334 struct btrfsic_block_data_ctx *block_ctx);
325static void btrfsic_dump_database(struct btrfsic_state *state); 335static void btrfsic_dump_database(struct btrfsic_state *state);
326static void btrfsic_complete_bio_end_io(struct bio *bio, int err);
327static int btrfsic_test_for_metadata(struct btrfsic_state *state, 336static int btrfsic_test_for_metadata(struct btrfsic_state *state,
328 char **datav, unsigned int num_pages); 337 char **datav, unsigned int num_pages);
329static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 338static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
@@ -1677,7 +1686,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1677 for (i = 0; i < num_pages;) { 1686 for (i = 0; i < num_pages;) {
1678 struct bio *bio; 1687 struct bio *bio;
1679 unsigned int j; 1688 unsigned int j;
1680 DECLARE_COMPLETION_ONSTACK(complete);
1681 1689
1682 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); 1690 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
1683 if (!bio) { 1691 if (!bio) {
@@ -1688,8 +1696,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1688 } 1696 }
1689 bio->bi_bdev = block_ctx->dev->bdev; 1697 bio->bi_bdev = block_ctx->dev->bdev;
1690 bio->bi_sector = dev_bytenr >> 9; 1698 bio->bi_sector = dev_bytenr >> 9;
1691 bio->bi_end_io = btrfsic_complete_bio_end_io;
1692 bio->bi_private = &complete;
1693 1699
1694 for (j = i; j < num_pages; j++) { 1700 for (j = i; j < num_pages; j++) {
1695 ret = bio_add_page(bio, block_ctx->pagev[j], 1701 ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -1702,12 +1708,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1702 "btrfsic: error, failed to add a single page!\n"); 1708 "btrfsic: error, failed to add a single page!\n");
1703 return -1; 1709 return -1;
1704 } 1710 }
1705 submit_bio(READ, bio); 1711 if (submit_bio_wait(READ, bio)) {
1706
1707 /* this will also unplug the queue */
1708 wait_for_completion(&complete);
1709
1710 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1711 printk(KERN_INFO 1712 printk(KERN_INFO
1712 "btrfsic: read error at logical %llu dev %s!\n", 1713 "btrfsic: read error at logical %llu dev %s!\n",
1713 block_ctx->start, block_ctx->dev->name); 1714 block_ctx->start, block_ctx->dev->name);
@@ -1730,11 +1731,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1730 return block_ctx->len; 1731 return block_ctx->len;
1731} 1732}
1732 1733
1733static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
1734{
1735 complete((struct completion *)bio->bi_private);
1736}
1737
1738static void btrfsic_dump_database(struct btrfsic_state *state) 1734static void btrfsic_dump_database(struct btrfsic_state *state)
1739{ 1735{
1740 struct list_head *elem_all; 1736 struct list_head *elem_all;
@@ -2998,14 +2994,12 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
2998 return submit_bh(rw, bh); 2994 return submit_bh(rw, bh);
2999} 2995}
3000 2996
3001void btrfsic_submit_bio(int rw, struct bio *bio) 2997static void __btrfsic_submit_bio(int rw, struct bio *bio)
3002{ 2998{
3003 struct btrfsic_dev_state *dev_state; 2999 struct btrfsic_dev_state *dev_state;
3004 3000
3005 if (!btrfsic_is_initialized) { 3001 if (!btrfsic_is_initialized)
3006 submit_bio(rw, bio);
3007 return; 3002 return;
3008 }
3009 3003
3010 mutex_lock(&btrfsic_mutex); 3004 mutex_lock(&btrfsic_mutex);
3011 /* since btrfsic_submit_bio() is also called before 3005 /* since btrfsic_submit_bio() is also called before
@@ -3015,6 +3009,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
3015 (rw & WRITE) && NULL != bio->bi_io_vec) { 3009 (rw & WRITE) && NULL != bio->bi_io_vec) {
3016 unsigned int i; 3010 unsigned int i;
3017 u64 dev_bytenr; 3011 u64 dev_bytenr;
3012 u64 cur_bytenr;
3018 int bio_is_patched; 3013 int bio_is_patched;
3019 char **mapped_datav; 3014 char **mapped_datav;
3020 3015
@@ -3033,6 +3028,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
3033 GFP_NOFS); 3028 GFP_NOFS);
3034 if (!mapped_datav) 3029 if (!mapped_datav)
3035 goto leave; 3030 goto leave;
3031 cur_bytenr = dev_bytenr;
3036 for (i = 0; i < bio->bi_vcnt; i++) { 3032 for (i = 0; i < bio->bi_vcnt; i++) {
3037 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE); 3033 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE);
3038 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); 3034 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
@@ -3044,16 +3040,13 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
3044 kfree(mapped_datav); 3040 kfree(mapped_datav);
3045 goto leave; 3041 goto leave;
3046 } 3042 }
3047 if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 3043 if (dev_state->state->print_mask &
3048 BTRFSIC_PRINT_MASK_VERBOSE) == 3044 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
3049 (dev_state->state->print_mask &
3050 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
3051 BTRFSIC_PRINT_MASK_VERBOSE)))
3052 printk(KERN_INFO 3045 printk(KERN_INFO
3053 "#%u: page=%p, len=%u, offset=%u\n", 3046 "#%u: bytenr=%llu, len=%u, offset=%u\n",
3054 i, bio->bi_io_vec[i].bv_page, 3047 i, cur_bytenr, bio->bi_io_vec[i].bv_len,
3055 bio->bi_io_vec[i].bv_len,
3056 bio->bi_io_vec[i].bv_offset); 3048 bio->bi_io_vec[i].bv_offset);
3049 cur_bytenr += bio->bi_io_vec[i].bv_len;
3057 } 3050 }
3058 btrfsic_process_written_block(dev_state, dev_bytenr, 3051 btrfsic_process_written_block(dev_state, dev_bytenr,
3059 mapped_datav, bio->bi_vcnt, 3052 mapped_datav, bio->bi_vcnt,
@@ -3097,10 +3090,20 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
3097 } 3090 }
3098leave: 3091leave:
3099 mutex_unlock(&btrfsic_mutex); 3092 mutex_unlock(&btrfsic_mutex);
3093}
3100 3094
3095void btrfsic_submit_bio(int rw, struct bio *bio)
3096{
3097 __btrfsic_submit_bio(rw, bio);
3101 submit_bio(rw, bio); 3098 submit_bio(rw, bio);
3102} 3099}
3103 3100
3101int btrfsic_submit_bio_wait(int rw, struct bio *bio)
3102{
3103 __btrfsic_submit_bio(rw, bio);
3104 return submit_bio_wait(rw, bio);
3105}
3106
3104int btrfsic_mount(struct btrfs_root *root, 3107int btrfsic_mount(struct btrfs_root *root,
3105 struct btrfs_fs_devices *fs_devices, 3108 struct btrfs_fs_devices *fs_devices,
3106 int including_extent_data, u32 print_mask) 3109 int including_extent_data, u32 print_mask)
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
index 8b59175cc502..13b8566c97ab 100644
--- a/fs/btrfs/check-integrity.h
+++ b/fs/btrfs/check-integrity.h
@@ -22,9 +22,11 @@
22#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 22#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
23int btrfsic_submit_bh(int rw, struct buffer_head *bh); 23int btrfsic_submit_bh(int rw, struct buffer_head *bh);
24void btrfsic_submit_bio(int rw, struct bio *bio); 24void btrfsic_submit_bio(int rw, struct bio *bio);
25int btrfsic_submit_bio_wait(int rw, struct bio *bio);
25#else 26#else
26#define btrfsic_submit_bh submit_bh 27#define btrfsic_submit_bh submit_bh
27#define btrfsic_submit_bio submit_bio 28#define btrfsic_submit_bio submit_bio
29#define btrfsic_submit_bio_wait submit_bio_wait
28#endif 30#endif
29 31
30int btrfsic_mount(struct btrfs_root *root, 32int btrfsic_mount(struct btrfs_root *root,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index f9aeb2759a64..54ab86127f7a 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3613,9 +3613,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
3613 struct btrfs_ordered_sum *sums); 3613 struct btrfs_ordered_sum *sums);
3614int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 3614int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
3615 struct bio *bio, u64 file_start, int contig); 3615 struct bio *bio, u64 file_start, int contig);
3616int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
3617 struct btrfs_root *root, struct btrfs_path *path,
3618 u64 isize);
3619int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 3616int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
3620 struct list_head *list, int search_commit); 3617 struct list_head *list, int search_commit);
3621/* inode.c */ 3618/* inode.c */
@@ -3744,9 +3741,6 @@ void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
3744int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3741int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3745void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 3742void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
3746 int skip_pinned); 3743 int skip_pinned);
3747int btrfs_replace_extent_cache(struct inode *inode, struct extent_map *replace,
3748 u64 start, u64 end, int skip_pinned,
3749 int modified);
3750extern const struct file_operations btrfs_file_operations; 3744extern const struct file_operations btrfs_file_operations;
3751int __btrfs_drop_extents(struct btrfs_trans_handle *trans, 3745int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
3752 struct btrfs_root *root, struct inode *inode, 3746 struct btrfs_root *root, struct inode *inode,
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 342f9fd411e3..2cfc3dfff64f 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -366,7 +366,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
366 dev_replace->tgtdev = tgt_device; 366 dev_replace->tgtdev = tgt_device;
367 367
368 printk_in_rcu(KERN_INFO 368 printk_in_rcu(KERN_INFO
369 "btrfs: dev_replace from %s (devid %llu) to %s) started\n", 369 "btrfs: dev_replace from %s (devid %llu) to %s started\n",
370 src_device->missing ? "<missing disk>" : 370 src_device->missing ? "<missing disk>" :
371 rcu_str_deref(src_device->name), 371 rcu_str_deref(src_device->name),
372 src_device->devid, 372 src_device->devid,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4c4ed0bb3da1..8072cfa8a3b1 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3517,7 +3517,6 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3517int btrfs_commit_super(struct btrfs_root *root) 3517int btrfs_commit_super(struct btrfs_root *root)
3518{ 3518{
3519 struct btrfs_trans_handle *trans; 3519 struct btrfs_trans_handle *trans;
3520 int ret;
3521 3520
3522 mutex_lock(&root->fs_info->cleaner_mutex); 3521 mutex_lock(&root->fs_info->cleaner_mutex);
3523 btrfs_run_delayed_iputs(root); 3522 btrfs_run_delayed_iputs(root);
@@ -3531,25 +3530,7 @@ int btrfs_commit_super(struct btrfs_root *root)
3531 trans = btrfs_join_transaction(root); 3530 trans = btrfs_join_transaction(root);
3532 if (IS_ERR(trans)) 3531 if (IS_ERR(trans))
3533 return PTR_ERR(trans); 3532 return PTR_ERR(trans);
3534 ret = btrfs_commit_transaction(trans, root); 3533 return btrfs_commit_transaction(trans, root);
3535 if (ret)
3536 return ret;
3537 /* run commit again to drop the original snapshot */
3538 trans = btrfs_join_transaction(root);
3539 if (IS_ERR(trans))
3540 return PTR_ERR(trans);
3541 ret = btrfs_commit_transaction(trans, root);
3542 if (ret)
3543 return ret;
3544 ret = btrfs_write_and_wait_transaction(NULL, root);
3545 if (ret) {
3546 btrfs_error(root->fs_info, ret,
3547 "Failed to sync btree inode to disk.");
3548 return ret;
3549 }
3550
3551 ret = write_ctree_super(NULL, root, 0);
3552 return ret;
3553} 3534}
3554 3535
3555int close_ctree(struct btrfs_root *root) 3536int close_ctree(struct btrfs_root *root)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 45d98d01028f..9c01509dd8ab 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -767,20 +767,19 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
767 if (!path) 767 if (!path)
768 return -ENOMEM; 768 return -ENOMEM;
769 769
770 if (metadata) {
771 key.objectid = bytenr;
772 key.type = BTRFS_METADATA_ITEM_KEY;
773 key.offset = offset;
774 } else {
775 key.objectid = bytenr;
776 key.type = BTRFS_EXTENT_ITEM_KEY;
777 key.offset = offset;
778 }
779
780 if (!trans) { 770 if (!trans) {
781 path->skip_locking = 1; 771 path->skip_locking = 1;
782 path->search_commit_root = 1; 772 path->search_commit_root = 1;
783 } 773 }
774
775search_again:
776 key.objectid = bytenr;
777 key.offset = offset;
778 if (metadata)
779 key.type = BTRFS_METADATA_ITEM_KEY;
780 else
781 key.type = BTRFS_EXTENT_ITEM_KEY;
782
784again: 783again:
785 ret = btrfs_search_slot(trans, root->fs_info->extent_root, 784 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
786 &key, path, 0, 0); 785 &key, path, 0, 0);
@@ -788,7 +787,6 @@ again:
788 goto out_free; 787 goto out_free;
789 788
790 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { 789 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
791 metadata = 0;
792 if (path->slots[0]) { 790 if (path->slots[0]) {
793 path->slots[0]--; 791 path->slots[0]--;
794 btrfs_item_key_to_cpu(path->nodes[0], &key, 792 btrfs_item_key_to_cpu(path->nodes[0], &key,
@@ -855,7 +853,7 @@ again:
855 mutex_lock(&head->mutex); 853 mutex_lock(&head->mutex);
856 mutex_unlock(&head->mutex); 854 mutex_unlock(&head->mutex);
857 btrfs_put_delayed_ref(&head->node); 855 btrfs_put_delayed_ref(&head->node);
858 goto again; 856 goto search_again;
859 } 857 }
860 if (head->extent_op && head->extent_op->update_flags) 858 if (head->extent_op && head->extent_op->update_flags)
861 extent_flags |= head->extent_op->flags_to_set; 859 extent_flags |= head->extent_op->flags_to_set;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 856bc2b2192c..ff43802a7c88 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1952,11 +1952,6 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1952 return err; 1952 return err;
1953} 1953}
1954 1954
1955static void repair_io_failure_callback(struct bio *bio, int err)
1956{
1957 complete(bio->bi_private);
1958}
1959
1960/* 1955/*
1961 * this bypasses the standard btrfs submit functions deliberately, as 1956 * this bypasses the standard btrfs submit functions deliberately, as
1962 * the standard behavior is to write all copies in a raid setup. here we only 1957 * the standard behavior is to write all copies in a raid setup. here we only
@@ -1973,13 +1968,13 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1973{ 1968{
1974 struct bio *bio; 1969 struct bio *bio;
1975 struct btrfs_device *dev; 1970 struct btrfs_device *dev;
1976 DECLARE_COMPLETION_ONSTACK(compl);
1977 u64 map_length = 0; 1971 u64 map_length = 0;
1978 u64 sector; 1972 u64 sector;
1979 struct btrfs_bio *bbio = NULL; 1973 struct btrfs_bio *bbio = NULL;
1980 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 1974 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1981 int ret; 1975 int ret;
1982 1976
1977 ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
1983 BUG_ON(!mirror_num); 1978 BUG_ON(!mirror_num);
1984 1979
1985 /* we can't repair anything in raid56 yet */ 1980 /* we can't repair anything in raid56 yet */
@@ -1989,8 +1984,6 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1989 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1984 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1990 if (!bio) 1985 if (!bio)
1991 return -EIO; 1986 return -EIO;
1992 bio->bi_private = &compl;
1993 bio->bi_end_io = repair_io_failure_callback;
1994 bio->bi_size = 0; 1987 bio->bi_size = 0;
1995 map_length = length; 1988 map_length = length;
1996 1989
@@ -2011,10 +2004,8 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
2011 } 2004 }
2012 bio->bi_bdev = dev->bdev; 2005 bio->bi_bdev = dev->bdev;
2013 bio_add_page(bio, page, length, start - page_offset(page)); 2006 bio_add_page(bio, page, length, start - page_offset(page));
2014 btrfsic_submit_bio(WRITE_SYNC, bio);
2015 wait_for_completion(&compl);
2016 2007
2017 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2008 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2018 /* try to remap that extent elsewhere? */ 2009 /* try to remap that extent elsewhere? */
2019 bio_put(bio); 2010 bio_put(bio);
2020 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 2011 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
@@ -2036,6 +2027,9 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2036 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); 2027 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2037 int ret = 0; 2028 int ret = 0;
2038 2029
2030 if (root->fs_info->sb->s_flags & MS_RDONLY)
2031 return -EROFS;
2032
2039 for (i = 0; i < num_pages; i++) { 2033 for (i = 0; i < num_pages; i++) {
2040 struct page *p = extent_buffer_page(eb, i); 2034 struct page *p = extent_buffer_page(eb, i);
2041 ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE, 2035 ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
@@ -2057,12 +2051,12 @@ static int clean_io_failure(u64 start, struct page *page)
2057 u64 private; 2051 u64 private;
2058 u64 private_failure; 2052 u64 private_failure;
2059 struct io_failure_record *failrec; 2053 struct io_failure_record *failrec;
2060 struct btrfs_fs_info *fs_info; 2054 struct inode *inode = page->mapping->host;
2055 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2061 struct extent_state *state; 2056 struct extent_state *state;
2062 int num_copies; 2057 int num_copies;
2063 int did_repair = 0; 2058 int did_repair = 0;
2064 int ret; 2059 int ret;
2065 struct inode *inode = page->mapping->host;
2066 2060
2067 private = 0; 2061 private = 0;
2068 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, 2062 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
@@ -2085,6 +2079,8 @@ static int clean_io_failure(u64 start, struct page *page)
2085 did_repair = 1; 2079 did_repair = 1;
2086 goto out; 2080 goto out;
2087 } 2081 }
2082 if (fs_info->sb->s_flags & MS_RDONLY)
2083 goto out;
2088 2084
2089 spin_lock(&BTRFS_I(inode)->io_tree.lock); 2085 spin_lock(&BTRFS_I(inode)->io_tree.lock);
2090 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, 2086 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
@@ -2094,7 +2090,6 @@ static int clean_io_failure(u64 start, struct page *page)
2094 2090
2095 if (state && state->start <= failrec->start && 2091 if (state && state->start <= failrec->start &&
2096 state->end >= failrec->start + failrec->len - 1) { 2092 state->end >= failrec->start + failrec->len - 1) {
2097 fs_info = BTRFS_I(inode)->root->fs_info;
2098 num_copies = btrfs_num_copies(fs_info, failrec->logical, 2093 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2099 failrec->len); 2094 failrec->len);
2100 if (num_copies > 1) { 2095 if (num_copies > 1) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index da8d2f696ac5..f1a77449d032 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2129,7 +2129,8 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path,
2129 old->extent_offset, fs_info, 2129 old->extent_offset, fs_info,
2130 path, record_one_backref, 2130 path, record_one_backref,
2131 old); 2131 old);
2132 BUG_ON(ret < 0 && ret != -ENOENT); 2132 if (ret < 0 && ret != -ENOENT)
2133 return false;
2133 2134
2134 /* no backref to be processed for this extent */ 2135 /* no backref to be processed for this extent */
2135 if (!old->count) { 2136 if (!old->count) {
@@ -6186,8 +6187,7 @@ insert:
6186 write_unlock(&em_tree->lock); 6187 write_unlock(&em_tree->lock);
6187out: 6188out:
6188 6189
6189 if (em) 6190 trace_btrfs_get_extent(root, em);
6190 trace_btrfs_get_extent(root, em);
6191 6191
6192 if (path) 6192 if (path)
6193 btrfs_free_path(path); 6193 btrfs_free_path(path);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a111622598b0..21da5762b0b1 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2121,7 +2121,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2121 2121
2122 err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); 2122 err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
2123 if (err == -EINTR) 2123 if (err == -EINTR)
2124 goto out; 2124 goto out_drop_write;
2125 dentry = lookup_one_len(vol_args->name, parent, namelen); 2125 dentry = lookup_one_len(vol_args->name, parent, namelen);
2126 if (IS_ERR(dentry)) { 2126 if (IS_ERR(dentry)) {
2127 err = PTR_ERR(dentry); 2127 err = PTR_ERR(dentry);
@@ -2284,6 +2284,7 @@ out_dput:
2284 dput(dentry); 2284 dput(dentry);
2285out_unlock_dir: 2285out_unlock_dir:
2286 mutex_unlock(&dir->i_mutex); 2286 mutex_unlock(&dir->i_mutex);
2287out_drop_write:
2287 mnt_drop_write_file(file); 2288 mnt_drop_write_file(file);
2288out: 2289out:
2289 kfree(vol_args); 2290 kfree(vol_args);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 25a8f3812f14..69582d5b69d1 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -638,6 +638,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
638 WARN_ON(nr < 0); 638 WARN_ON(nr < 0);
639 } 639 }
640 } 640 }
641 list_splice_tail(&splice, &fs_info->ordered_roots);
641 spin_unlock(&fs_info->ordered_root_lock); 642 spin_unlock(&fs_info->ordered_root_lock);
642} 643}
643 644
@@ -803,7 +804,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
803 btrfs_put_ordered_extent(ordered); 804 btrfs_put_ordered_extent(ordered);
804 break; 805 break;
805 } 806 }
806 if (ordered->file_offset + ordered->len < start) { 807 if (ordered->file_offset + ordered->len <= start) {
807 btrfs_put_ordered_extent(ordered); 808 btrfs_put_ordered_extent(ordered);
808 break; 809 break;
809 } 810 }
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ce459a7cb16d..429c73c374b8 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -571,7 +571,9 @@ static int is_cowonly_root(u64 root_objectid)
571 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 571 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
572 root_objectid == BTRFS_DEV_TREE_OBJECTID || 572 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
573 root_objectid == BTRFS_TREE_LOG_OBJECTID || 573 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
574 root_objectid == BTRFS_CSUM_TREE_OBJECTID) 574 root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
575 root_objectid == BTRFS_UUID_TREE_OBJECTID ||
576 root_objectid == BTRFS_QUOTA_TREE_OBJECTID)
575 return 1; 577 return 1;
576 return 0; 578 return 0;
577} 579}
@@ -1264,10 +1266,10 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
1264} 1266}
1265 1267
1266/* 1268/*
1267 * helper to update/delete the 'address of tree root -> reloc tree' 1269 * helper to delete the 'address of tree root -> reloc tree'
1268 * mapping 1270 * mapping
1269 */ 1271 */
1270static int __update_reloc_root(struct btrfs_root *root, int del) 1272static void __del_reloc_root(struct btrfs_root *root)
1271{ 1273{
1272 struct rb_node *rb_node; 1274 struct rb_node *rb_node;
1273 struct mapping_node *node = NULL; 1275 struct mapping_node *node = NULL;
@@ -1275,7 +1277,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
1275 1277
1276 spin_lock(&rc->reloc_root_tree.lock); 1278 spin_lock(&rc->reloc_root_tree.lock);
1277 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1279 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1278 root->commit_root->start); 1280 root->node->start);
1279 if (rb_node) { 1281 if (rb_node) {
1280 node = rb_entry(rb_node, struct mapping_node, rb_node); 1282 node = rb_entry(rb_node, struct mapping_node, rb_node);
1281 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1283 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1283,23 +1285,45 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
1283 spin_unlock(&rc->reloc_root_tree.lock); 1285 spin_unlock(&rc->reloc_root_tree.lock);
1284 1286
1285 if (!node) 1287 if (!node)
1286 return 0; 1288 return;
1287 BUG_ON((struct btrfs_root *)node->data != root); 1289 BUG_ON((struct btrfs_root *)node->data != root);
1288 1290
1289 if (!del) { 1291 spin_lock(&root->fs_info->trans_lock);
1290 spin_lock(&rc->reloc_root_tree.lock); 1292 list_del_init(&root->root_list);
1291 node->bytenr = root->node->start; 1293 spin_unlock(&root->fs_info->trans_lock);
1292 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1294 kfree(node);
1293 node->bytenr, &node->rb_node); 1295}
1294 spin_unlock(&rc->reloc_root_tree.lock); 1296
1295 if (rb_node) 1297/*
1296 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1298 * helper to update the 'address of tree root -> reloc tree'
1297 } else { 1299 * mapping
1298 spin_lock(&root->fs_info->trans_lock); 1300 */
1299 list_del_init(&root->root_list); 1301static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
1300 spin_unlock(&root->fs_info->trans_lock); 1302{
1301 kfree(node); 1303 struct rb_node *rb_node;
1304 struct mapping_node *node = NULL;
1305 struct reloc_control *rc = root->fs_info->reloc_ctl;
1306
1307 spin_lock(&rc->reloc_root_tree.lock);
1308 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1309 root->node->start);
1310 if (rb_node) {
1311 node = rb_entry(rb_node, struct mapping_node, rb_node);
1312 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1302 } 1313 }
1314 spin_unlock(&rc->reloc_root_tree.lock);
1315
1316 if (!node)
1317 return 0;
1318 BUG_ON((struct btrfs_root *)node->data != root);
1319
1320 spin_lock(&rc->reloc_root_tree.lock);
1321 node->bytenr = new_bytenr;
1322 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1323 node->bytenr, &node->rb_node);
1324 spin_unlock(&rc->reloc_root_tree.lock);
1325 if (rb_node)
1326 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1303 return 0; 1327 return 0;
1304} 1328}
1305 1329
@@ -1420,7 +1444,6 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1420{ 1444{
1421 struct btrfs_root *reloc_root; 1445 struct btrfs_root *reloc_root;
1422 struct btrfs_root_item *root_item; 1446 struct btrfs_root_item *root_item;
1423 int del = 0;
1424 int ret; 1447 int ret;
1425 1448
1426 if (!root->reloc_root) 1449 if (!root->reloc_root)
@@ -1432,11 +1455,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1432 if (root->fs_info->reloc_ctl->merge_reloc_tree && 1455 if (root->fs_info->reloc_ctl->merge_reloc_tree &&
1433 btrfs_root_refs(root_item) == 0) { 1456 btrfs_root_refs(root_item) == 0) {
1434 root->reloc_root = NULL; 1457 root->reloc_root = NULL;
1435 del = 1; 1458 __del_reloc_root(reloc_root);
1436 } 1459 }
1437 1460
1438 __update_reloc_root(reloc_root, del);
1439
1440 if (reloc_root->commit_root != reloc_root->node) { 1461 if (reloc_root->commit_root != reloc_root->node) {
1441 btrfs_set_root_node(root_item, reloc_root->node); 1462 btrfs_set_root_node(root_item, reloc_root->node);
1442 free_extent_buffer(reloc_root->commit_root); 1463 free_extent_buffer(reloc_root->commit_root);
@@ -2287,7 +2308,7 @@ void free_reloc_roots(struct list_head *list)
2287 while (!list_empty(list)) { 2308 while (!list_empty(list)) {
2288 reloc_root = list_entry(list->next, struct btrfs_root, 2309 reloc_root = list_entry(list->next, struct btrfs_root,
2289 root_list); 2310 root_list);
2290 __update_reloc_root(reloc_root, 1); 2311 __del_reloc_root(reloc_root);
2291 free_extent_buffer(reloc_root->node); 2312 free_extent_buffer(reloc_root->node);
2292 free_extent_buffer(reloc_root->commit_root); 2313 free_extent_buffer(reloc_root->commit_root);
2293 kfree(reloc_root); 2314 kfree(reloc_root);
@@ -2332,7 +2353,7 @@ again:
2332 2353
2333 ret = merge_reloc_root(rc, root); 2354 ret = merge_reloc_root(rc, root);
2334 if (ret) { 2355 if (ret) {
2335 __update_reloc_root(reloc_root, 1); 2356 __del_reloc_root(reloc_root);
2336 free_extent_buffer(reloc_root->node); 2357 free_extent_buffer(reloc_root->node);
2337 free_extent_buffer(reloc_root->commit_root); 2358 free_extent_buffer(reloc_root->commit_root);
2338 kfree(reloc_root); 2359 kfree(reloc_root);
@@ -2388,6 +2409,13 @@ out:
2388 btrfs_std_error(root->fs_info, ret); 2409 btrfs_std_error(root->fs_info, ret);
2389 if (!list_empty(&reloc_roots)) 2410 if (!list_empty(&reloc_roots))
2390 free_reloc_roots(&reloc_roots); 2411 free_reloc_roots(&reloc_roots);
2412
2413 /* new reloc root may be added */
2414 mutex_lock(&root->fs_info->reloc_mutex);
2415 list_splice_init(&rc->reloc_roots, &reloc_roots);
2416 mutex_unlock(&root->fs_info->reloc_mutex);
2417 if (!list_empty(&reloc_roots))
2418 free_reloc_roots(&reloc_roots);
2391 } 2419 }
2392 2420
2393 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2421 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
@@ -4522,6 +4550,11 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4522 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4550 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4523 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4551 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4524 4552
4553 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
4554 if (buf == root->node)
4555 __update_reloc_root(root, cow->start);
4556 }
4557
4525 level = btrfs_header_level(buf); 4558 level = btrfs_header_level(buf);
4526 if (btrfs_header_generation(buf) <= 4559 if (btrfs_header_generation(buf) <=
4527 btrfs_root_last_snapshot(&root->root_item)) 4560 btrfs_root_last_snapshot(&root->root_item))
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2544805544f0..1fd3f33c330a 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -208,7 +208,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
208 int is_metadata, int have_csum, 208 int is_metadata, int have_csum,
209 const u8 *csum, u64 generation, 209 const u8 *csum, u64 generation,
210 u16 csum_size); 210 u16 csum_size);
211static void scrub_complete_bio_end_io(struct bio *bio, int err);
212static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 211static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
213 struct scrub_block *sblock_good, 212 struct scrub_block *sblock_good,
214 int force_write); 213 int force_write);
@@ -938,8 +937,10 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
938 BTRFS_DEV_STAT_CORRUPTION_ERRS); 937 BTRFS_DEV_STAT_CORRUPTION_ERRS);
939 } 938 }
940 939
941 if (sctx->readonly && !sctx->is_dev_replace) 940 if (sctx->readonly) {
942 goto did_not_correct_error; 941 ASSERT(!sctx->is_dev_replace);
942 goto out;
943 }
943 944
944 if (!is_metadata && !have_csum) { 945 if (!is_metadata && !have_csum) {
945 struct scrub_fixup_nodatasum *fixup_nodatasum; 946 struct scrub_fixup_nodatasum *fixup_nodatasum;
@@ -1292,7 +1293,6 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1292 for (page_num = 0; page_num < sblock->page_count; page_num++) { 1293 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1293 struct bio *bio; 1294 struct bio *bio;
1294 struct scrub_page *page = sblock->pagev[page_num]; 1295 struct scrub_page *page = sblock->pagev[page_num];
1295 DECLARE_COMPLETION_ONSTACK(complete);
1296 1296
1297 if (page->dev->bdev == NULL) { 1297 if (page->dev->bdev == NULL) {
1298 page->io_error = 1; 1298 page->io_error = 1;
@@ -1309,18 +1309,11 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1309 } 1309 }
1310 bio->bi_bdev = page->dev->bdev; 1310 bio->bi_bdev = page->dev->bdev;
1311 bio->bi_sector = page->physical >> 9; 1311 bio->bi_sector = page->physical >> 9;
1312 bio->bi_end_io = scrub_complete_bio_end_io;
1313 bio->bi_private = &complete;
1314 1312
1315 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1313 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1316 btrfsic_submit_bio(READ, bio); 1314 if (btrfsic_submit_bio_wait(READ, bio))
1317
1318 /* this will also unplug the queue */
1319 wait_for_completion(&complete);
1320
1321 page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1322 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1323 sblock->no_io_error_seen = 0; 1315 sblock->no_io_error_seen = 0;
1316
1324 bio_put(bio); 1317 bio_put(bio);
1325 } 1318 }
1326 1319
@@ -1389,11 +1382,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1389 sblock->checksum_error = 1; 1382 sblock->checksum_error = 1;
1390} 1383}
1391 1384
1392static void scrub_complete_bio_end_io(struct bio *bio, int err)
1393{
1394 complete((struct completion *)bio->bi_private);
1395}
1396
1397static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 1385static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1398 struct scrub_block *sblock_good, 1386 struct scrub_block *sblock_good,
1399 int force_write) 1387 int force_write)
@@ -1428,7 +1416,6 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1428 sblock_bad->checksum_error || page_bad->io_error) { 1416 sblock_bad->checksum_error || page_bad->io_error) {
1429 struct bio *bio; 1417 struct bio *bio;
1430 int ret; 1418 int ret;
1431 DECLARE_COMPLETION_ONSTACK(complete);
1432 1419
1433 if (!page_bad->dev->bdev) { 1420 if (!page_bad->dev->bdev) {
1434 printk_ratelimited(KERN_WARNING 1421 printk_ratelimited(KERN_WARNING
@@ -1441,19 +1428,14 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1441 return -EIO; 1428 return -EIO;
1442 bio->bi_bdev = page_bad->dev->bdev; 1429 bio->bi_bdev = page_bad->dev->bdev;
1443 bio->bi_sector = page_bad->physical >> 9; 1430 bio->bi_sector = page_bad->physical >> 9;
1444 bio->bi_end_io = scrub_complete_bio_end_io;
1445 bio->bi_private = &complete;
1446 1431
1447 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); 1432 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1448 if (PAGE_SIZE != ret) { 1433 if (PAGE_SIZE != ret) {
1449 bio_put(bio); 1434 bio_put(bio);
1450 return -EIO; 1435 return -EIO;
1451 } 1436 }
1452 btrfsic_submit_bio(WRITE, bio);
1453 1437
1454 /* this will also unplug the queue */ 1438 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1455 wait_for_completion(&complete);
1456 if (!bio_flagged(bio, BIO_UPTODATE)) {
1457 btrfs_dev_stat_inc_and_print(page_bad->dev, 1439 btrfs_dev_stat_inc_and_print(page_bad->dev,
1458 BTRFS_DEV_STAT_WRITE_ERRS); 1440 BTRFS_DEV_STAT_WRITE_ERRS);
1459 btrfs_dev_replace_stats_inc( 1441 btrfs_dev_replace_stats_inc(
@@ -3373,7 +3355,6 @@ static int write_page_nocow(struct scrub_ctx *sctx,
3373 struct bio *bio; 3355 struct bio *bio;
3374 struct btrfs_device *dev; 3356 struct btrfs_device *dev;
3375 int ret; 3357 int ret;
3376 DECLARE_COMPLETION_ONSTACK(compl);
3377 3358
3378 dev = sctx->wr_ctx.tgtdev; 3359 dev = sctx->wr_ctx.tgtdev;
3379 if (!dev) 3360 if (!dev)
@@ -3390,8 +3371,6 @@ static int write_page_nocow(struct scrub_ctx *sctx,
3390 spin_unlock(&sctx->stat_lock); 3371 spin_unlock(&sctx->stat_lock);
3391 return -ENOMEM; 3372 return -ENOMEM;
3392 } 3373 }
3393 bio->bi_private = &compl;
3394 bio->bi_end_io = scrub_complete_bio_end_io;
3395 bio->bi_size = 0; 3374 bio->bi_size = 0;
3396 bio->bi_sector = physical_for_dev_replace >> 9; 3375 bio->bi_sector = physical_for_dev_replace >> 9;
3397 bio->bi_bdev = dev->bdev; 3376 bio->bi_bdev = dev->bdev;
@@ -3402,10 +3381,8 @@ leave_with_eio:
3402 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 3381 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
3403 return -EIO; 3382 return -EIO;
3404 } 3383 }
3405 btrfsic_submit_bio(WRITE_SYNC, bio);
3406 wait_for_completion(&compl);
3407 3384
3408 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 3385 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
3409 goto leave_with_eio; 3386 goto leave_with_eio;
3410 3387
3411 bio_put(bio); 3388 bio_put(bio);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6837fe87f3a6..945d1db98f26 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4723,8 +4723,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
4723 } 4723 }
4724 4724
4725 if (!access_ok(VERIFY_READ, arg->clone_sources, 4725 if (!access_ok(VERIFY_READ, arg->clone_sources,
4726 sizeof(*arg->clone_sources * 4726 sizeof(*arg->clone_sources) *
4727 arg->clone_sources_count))) { 4727 arg->clone_sources_count)) {
4728 ret = -EFAULT; 4728 ret = -EFAULT;
4729 goto out; 4729 goto out;
4730 } 4730 }
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 2d8ac1bf0cf9..d71a11d13dfa 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -432,7 +432,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
432 } else { 432 } else {
433 printk(KERN_INFO "btrfs: setting nodatacow\n"); 433 printk(KERN_INFO "btrfs: setting nodatacow\n");
434 } 434 }
435 info->compress_type = BTRFS_COMPRESS_NONE;
436 btrfs_clear_opt(info->mount_opt, COMPRESS); 435 btrfs_clear_opt(info->mount_opt, COMPRESS);
437 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 436 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
438 btrfs_set_opt(info->mount_opt, NODATACOW); 437 btrfs_set_opt(info->mount_opt, NODATACOW);
@@ -461,7 +460,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
461 btrfs_set_fs_incompat(info, COMPRESS_LZO); 460 btrfs_set_fs_incompat(info, COMPRESS_LZO);
462 } else if (strncmp(args[0].from, "no", 2) == 0) { 461 } else if (strncmp(args[0].from, "no", 2) == 0) {
463 compress_type = "no"; 462 compress_type = "no";
464 info->compress_type = BTRFS_COMPRESS_NONE;
465 btrfs_clear_opt(info->mount_opt, COMPRESS); 463 btrfs_clear_opt(info->mount_opt, COMPRESS);
466 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 464 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
467 compress_force = false; 465 compress_force = false;
@@ -474,9 +472,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
474 btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); 472 btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
475 pr_info("btrfs: force %s compression\n", 473 pr_info("btrfs: force %s compression\n",
476 compress_type); 474 compress_type);
477 } else 475 } else if (btrfs_test_opt(root, COMPRESS)) {
478 pr_info("btrfs: use %s compression\n", 476 pr_info("btrfs: use %s compression\n",
479 compress_type); 477 compress_type);
478 }
480 break; 479 break;
481 case Opt_ssd: 480 case Opt_ssd:
482 printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); 481 printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 57c16b46afbd..c6a872a8a468 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1480,7 +1480,7 @@ static void do_async_commit(struct work_struct *work)
1480 * We've got freeze protection passed with the transaction. 1480 * We've got freeze protection passed with the transaction.
1481 * Tell lockdep about it. 1481 * Tell lockdep about it.
1482 */ 1482 */
1483 if (ac->newtrans->type < TRANS_JOIN_NOLOCK) 1483 if (ac->newtrans->type & __TRANS_FREEZABLE)
1484 rwsem_acquire_read( 1484 rwsem_acquire_read(
1485 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1485 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1486 0, 1, _THIS_IP_); 1486 0, 1, _THIS_IP_);
@@ -1521,7 +1521,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1521 * Tell lockdep we've released the freeze rwsem, since the 1521 * Tell lockdep we've released the freeze rwsem, since the
1522 * async commit thread will be the one to unlock it. 1522 * async commit thread will be the one to unlock it.
1523 */ 1523 */
1524 if (trans->type < TRANS_JOIN_NOLOCK) 1524 if (ac->newtrans->type & __TRANS_FREEZABLE)
1525 rwsem_release( 1525 rwsem_release(
1526 &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1526 &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1527 1, _THIS_IP_); 1527 1, _THIS_IP_);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 744553c83fe2..9f7fc51ca334 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3697,7 +3697,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3697 ret = btrfs_truncate_inode_items(trans, log, 3697 ret = btrfs_truncate_inode_items(trans, log,
3698 inode, 0, 0); 3698 inode, 0, 0);
3699 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 3699 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3700 &BTRFS_I(inode)->runtime_flags)) { 3700 &BTRFS_I(inode)->runtime_flags) ||
3701 inode_only == LOG_INODE_EXISTS) {
3701 if (inode_only == LOG_INODE_ALL) 3702 if (inode_only == LOG_INODE_ALL)
3702 fast_search = true; 3703 fast_search = true;
3703 max_key.type = BTRFS_XATTR_ITEM_KEY; 3704 max_key.type = BTRFS_XATTR_ITEM_KEY;
@@ -3801,7 +3802,7 @@ log_extents:
3801 err = ret; 3802 err = ret;
3802 goto out_unlock; 3803 goto out_unlock;
3803 } 3804 }
3804 } else { 3805 } else if (inode_only == LOG_INODE_ALL) {
3805 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; 3806 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3806 struct extent_map *em, *n; 3807 struct extent_map *em, *n;
3807 3808
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0db637097862..92303f42baaa 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5394,7 +5394,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5394{ 5394{
5395 struct bio_vec *prev; 5395 struct bio_vec *prev;
5396 struct request_queue *q = bdev_get_queue(bdev); 5396 struct request_queue *q = bdev_get_queue(bdev);
5397 unsigned short max_sectors = queue_max_sectors(q); 5397 unsigned int max_sectors = queue_max_sectors(q);
5398 struct bvec_merge_data bvm = { 5398 struct bvec_merge_data bvm = {
5399 .bi_bdev = bdev, 5399 .bi_bdev = bdev,
5400 .bi_sector = sector, 5400 .bi_sector = sector,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 6df8bd481425..1e561c059539 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -216,7 +216,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
216 } 216 }
217 SetPageUptodate(page); 217 SetPageUptodate(page);
218 218
219 if (err == 0) 219 if (err >= 0)
220 ceph_readpage_to_fscache(inode, page); 220 ceph_readpage_to_fscache(inode, page);
221 221
222out: 222out:
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 7db2e6ca4b8f..8c44fdd4e1c3 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -324,6 +324,9 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
324{ 324{
325 struct ceph_inode_info *ci = ceph_inode(inode); 325 struct ceph_inode_info *ci = ceph_inode(inode);
326 326
327 if (!PageFsCache(page))
328 return;
329
327 fscache_wait_on_page_write(ci->fscache, page); 330 fscache_wait_on_page_write(ci->fscache, page);
328 fscache_uncache_page(ci->fscache, page); 331 fscache_uncache_page(ci->fscache, page);
329} 332}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 13976c33332e..3c0a4bd74996 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -897,7 +897,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
897 * caller should hold i_ceph_lock. 897 * caller should hold i_ceph_lock.
898 * caller will not hold session s_mutex if called from destroy_inode. 898 * caller will not hold session s_mutex if called from destroy_inode.
899 */ 899 */
900void __ceph_remove_cap(struct ceph_cap *cap) 900void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
901{ 901{
902 struct ceph_mds_session *session = cap->session; 902 struct ceph_mds_session *session = cap->session;
903 struct ceph_inode_info *ci = cap->ci; 903 struct ceph_inode_info *ci = cap->ci;
@@ -909,6 +909,16 @@ void __ceph_remove_cap(struct ceph_cap *cap)
909 909
910 /* remove from session list */ 910 /* remove from session list */
911 spin_lock(&session->s_cap_lock); 911 spin_lock(&session->s_cap_lock);
912 /*
913 * s_cap_reconnect is protected by s_cap_lock. no one changes
914 * s_cap_gen while session is in the reconnect state.
915 */
916 if (queue_release &&
917 (!session->s_cap_reconnect ||
918 cap->cap_gen == session->s_cap_gen))
919 __queue_cap_release(session, ci->i_vino.ino, cap->cap_id,
920 cap->mseq, cap->issue_seq);
921
912 if (session->s_cap_iterator == cap) { 922 if (session->s_cap_iterator == cap) {
913 /* not yet, we are iterating over this very cap */ 923 /* not yet, we are iterating over this very cap */
914 dout("__ceph_remove_cap delaying %p removal from session %p\n", 924 dout("__ceph_remove_cap delaying %p removal from session %p\n",
@@ -1023,7 +1033,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
1023 struct ceph_mds_cap_release *head; 1033 struct ceph_mds_cap_release *head;
1024 struct ceph_mds_cap_item *item; 1034 struct ceph_mds_cap_item *item;
1025 1035
1026 spin_lock(&session->s_cap_lock);
1027 BUG_ON(!session->s_num_cap_releases); 1036 BUG_ON(!session->s_num_cap_releases);
1028 msg = list_first_entry(&session->s_cap_releases, 1037 msg = list_first_entry(&session->s_cap_releases,
1029 struct ceph_msg, list_head); 1038 struct ceph_msg, list_head);
@@ -1052,7 +1061,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
1052 (int)CEPH_CAPS_PER_RELEASE, 1061 (int)CEPH_CAPS_PER_RELEASE,
1053 (int)msg->front.iov_len); 1062 (int)msg->front.iov_len);
1054 } 1063 }
1055 spin_unlock(&session->s_cap_lock);
1056} 1064}
1057 1065
1058/* 1066/*
@@ -1067,12 +1075,8 @@ void ceph_queue_caps_release(struct inode *inode)
1067 p = rb_first(&ci->i_caps); 1075 p = rb_first(&ci->i_caps);
1068 while (p) { 1076 while (p) {
1069 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); 1077 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1070 struct ceph_mds_session *session = cap->session;
1071
1072 __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
1073 cap->mseq, cap->issue_seq);
1074 p = rb_next(p); 1078 p = rb_next(p);
1075 __ceph_remove_cap(cap); 1079 __ceph_remove_cap(cap, true);
1076 } 1080 }
1077} 1081}
1078 1082
@@ -2791,7 +2795,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2791 } 2795 }
2792 spin_unlock(&mdsc->cap_dirty_lock); 2796 spin_unlock(&mdsc->cap_dirty_lock);
2793 } 2797 }
2794 __ceph_remove_cap(cap); 2798 __ceph_remove_cap(cap, false);
2795 } 2799 }
2796 /* else, we already released it */ 2800 /* else, we already released it */
2797 2801
@@ -2931,9 +2935,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2931 if (!inode) { 2935 if (!inode) {
2932 dout(" i don't have ino %llx\n", vino.ino); 2936 dout(" i don't have ino %llx\n", vino.ino);
2933 2937
2934 if (op == CEPH_CAP_OP_IMPORT) 2938 if (op == CEPH_CAP_OP_IMPORT) {
2939 spin_lock(&session->s_cap_lock);
2935 __queue_cap_release(session, vino.ino, cap_id, 2940 __queue_cap_release(session, vino.ino, cap_id,
2936 mseq, seq); 2941 mseq, seq);
2942 spin_unlock(&session->s_cap_lock);
2943 }
2937 goto flush_cap_releases; 2944 goto flush_cap_releases;
2938 } 2945 }
2939 2946
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 868b61d56cac..2a0bcaeb189a 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -352,8 +352,18 @@ more:
352 } 352 }
353 353
354 /* note next offset and last dentry name */ 354 /* note next offset and last dentry name */
355 rinfo = &req->r_reply_info;
356 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
357 frag = le32_to_cpu(rinfo->dir_dir->frag);
358 if (ceph_frag_is_leftmost(frag))
359 fi->next_offset = 2;
360 else
361 fi->next_offset = 0;
362 off = fi->next_offset;
363 }
355 fi->offset = fi->next_offset; 364 fi->offset = fi->next_offset;
356 fi->last_readdir = req; 365 fi->last_readdir = req;
366 fi->frag = frag;
357 367
358 if (req->r_reply_info.dir_end) { 368 if (req->r_reply_info.dir_end) {
359 kfree(fi->last_name); 369 kfree(fi->last_name);
@@ -363,7 +373,6 @@ more:
363 else 373 else
364 fi->next_offset = 0; 374 fi->next_offset = 0;
365 } else { 375 } else {
366 rinfo = &req->r_reply_info;
367 err = note_last_dentry(fi, 376 err = note_last_dentry(fi,
368 rinfo->dir_dname[rinfo->dir_nr-1], 377 rinfo->dir_dname[rinfo->dir_nr-1],
369 rinfo->dir_dname_len[rinfo->dir_nr-1]); 378 rinfo->dir_dname_len[rinfo->dir_nr-1]);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 8549a48115f7..9a8e396aed89 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -577,6 +577,8 @@ static int fill_inode(struct inode *inode,
577 int issued = 0, implemented; 577 int issued = 0, implemented;
578 struct timespec mtime, atime, ctime; 578 struct timespec mtime, atime, ctime;
579 u32 nsplits; 579 u32 nsplits;
580 struct ceph_inode_frag *frag;
581 struct rb_node *rb_node;
580 struct ceph_buffer *xattr_blob = NULL; 582 struct ceph_buffer *xattr_blob = NULL;
581 int err = 0; 583 int err = 0;
582 int queue_trunc = 0; 584 int queue_trunc = 0;
@@ -751,15 +753,38 @@ no_change:
751 /* FIXME: move me up, if/when version reflects fragtree changes */ 753 /* FIXME: move me up, if/when version reflects fragtree changes */
752 nsplits = le32_to_cpu(info->fragtree.nsplits); 754 nsplits = le32_to_cpu(info->fragtree.nsplits);
753 mutex_lock(&ci->i_fragtree_mutex); 755 mutex_lock(&ci->i_fragtree_mutex);
756 rb_node = rb_first(&ci->i_fragtree);
754 for (i = 0; i < nsplits; i++) { 757 for (i = 0; i < nsplits; i++) {
755 u32 id = le32_to_cpu(info->fragtree.splits[i].frag); 758 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
756 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id); 759 frag = NULL;
757 760 while (rb_node) {
758 if (IS_ERR(frag)) 761 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
759 continue; 762 if (ceph_frag_compare(frag->frag, id) >= 0) {
763 if (frag->frag != id)
764 frag = NULL;
765 else
766 rb_node = rb_next(rb_node);
767 break;
768 }
769 rb_node = rb_next(rb_node);
770 rb_erase(&frag->node, &ci->i_fragtree);
771 kfree(frag);
772 frag = NULL;
773 }
774 if (!frag) {
775 frag = __get_or_create_frag(ci, id);
776 if (IS_ERR(frag))
777 continue;
778 }
760 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); 779 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
761 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 780 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
762 } 781 }
782 while (rb_node) {
783 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
784 rb_node = rb_next(rb_node);
785 rb_erase(&frag->node, &ci->i_fragtree);
786 kfree(frag);
787 }
763 mutex_unlock(&ci->i_fragtree_mutex); 788 mutex_unlock(&ci->i_fragtree_mutex);
764 789
765 /* were we issued a capability? */ 790 /* were we issued a capability? */
@@ -1250,8 +1275,20 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1250 int err = 0, i; 1275 int err = 0, i;
1251 struct inode *snapdir = NULL; 1276 struct inode *snapdir = NULL;
1252 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1277 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1253 u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1254 struct ceph_dentry_info *di; 1278 struct ceph_dentry_info *di;
1279 u64 r_readdir_offset = req->r_readdir_offset;
1280 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1281
1282 if (rinfo->dir_dir &&
1283 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1284 dout("readdir_prepopulate got new frag %x -> %x\n",
1285 frag, le32_to_cpu(rinfo->dir_dir->frag));
1286 frag = le32_to_cpu(rinfo->dir_dir->frag);
1287 if (ceph_frag_is_leftmost(frag))
1288 r_readdir_offset = 2;
1289 else
1290 r_readdir_offset = 0;
1291 }
1255 1292
1256 if (req->r_aborted) 1293 if (req->r_aborted)
1257 return readdir_prepopulate_inodes_only(req, session); 1294 return readdir_prepopulate_inodes_only(req, session);
@@ -1315,7 +1352,7 @@ retry_lookup:
1315 } 1352 }
1316 1353
1317 di = dn->d_fsdata; 1354 di = dn->d_fsdata;
1318 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset); 1355 di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1319 1356
1320 /* inode */ 1357 /* inode */
1321 if (dn->d_inode) { 1358 if (dn->d_inode) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index b7bda5d9611d..d90861f45210 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45struct ceph_reconnect_state { 45struct ceph_reconnect_state {
46 int nr_caps;
46 struct ceph_pagelist *pagelist; 47 struct ceph_pagelist *pagelist;
47 bool flock; 48 bool flock;
48}; 49};
@@ -443,6 +444,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
443 INIT_LIST_HEAD(&s->s_waiting); 444 INIT_LIST_HEAD(&s->s_waiting);
444 INIT_LIST_HEAD(&s->s_unsafe); 445 INIT_LIST_HEAD(&s->s_unsafe);
445 s->s_num_cap_releases = 0; 446 s->s_num_cap_releases = 0;
447 s->s_cap_reconnect = 0;
446 s->s_cap_iterator = NULL; 448 s->s_cap_iterator = NULL;
447 INIT_LIST_HEAD(&s->s_cap_releases); 449 INIT_LIST_HEAD(&s->s_cap_releases);
448 INIT_LIST_HEAD(&s->s_cap_releases_done); 450 INIT_LIST_HEAD(&s->s_cap_releases_done);
@@ -642,6 +644,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
642 req->r_unsafe_dir = NULL; 644 req->r_unsafe_dir = NULL;
643 } 645 }
644 646
647 complete_all(&req->r_safe_completion);
648
645 ceph_mdsc_put_request(req); 649 ceph_mdsc_put_request(req);
646} 650}
647 651
@@ -986,7 +990,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
986 dout("removing cap %p, ci is %p, inode is %p\n", 990 dout("removing cap %p, ci is %p, inode is %p\n",
987 cap, ci, &ci->vfs_inode); 991 cap, ci, &ci->vfs_inode);
988 spin_lock(&ci->i_ceph_lock); 992 spin_lock(&ci->i_ceph_lock);
989 __ceph_remove_cap(cap); 993 __ceph_remove_cap(cap, false);
990 if (!__ceph_is_any_real_caps(ci)) { 994 if (!__ceph_is_any_real_caps(ci)) {
991 struct ceph_mds_client *mdsc = 995 struct ceph_mds_client *mdsc =
992 ceph_sb_to_client(inode->i_sb)->mdsc; 996 ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -1231,9 +1235,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1231 session->s_trim_caps--; 1235 session->s_trim_caps--;
1232 if (oissued) { 1236 if (oissued) {
1233 /* we aren't the only cap.. just remove us */ 1237 /* we aren't the only cap.. just remove us */
1234 __queue_cap_release(session, ceph_ino(inode), cap->cap_id, 1238 __ceph_remove_cap(cap, true);
1235 cap->mseq, cap->issue_seq);
1236 __ceph_remove_cap(cap);
1237 } else { 1239 } else {
1238 /* try to drop referring dentries */ 1240 /* try to drop referring dentries */
1239 spin_unlock(&ci->i_ceph_lock); 1241 spin_unlock(&ci->i_ceph_lock);
@@ -1416,7 +1418,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
1416 unsigned num; 1418 unsigned num;
1417 1419
1418 dout("discard_cap_releases mds%d\n", session->s_mds); 1420 dout("discard_cap_releases mds%d\n", session->s_mds);
1419 spin_lock(&session->s_cap_lock);
1420 1421
1421 /* zero out the in-progress message */ 1422 /* zero out the in-progress message */
1422 msg = list_first_entry(&session->s_cap_releases, 1423 msg = list_first_entry(&session->s_cap_releases,
@@ -1443,8 +1444,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
1443 msg->front.iov_len = sizeof(*head); 1444 msg->front.iov_len = sizeof(*head);
1444 list_add(&msg->list_head, &session->s_cap_releases); 1445 list_add(&msg->list_head, &session->s_cap_releases);
1445 } 1446 }
1446
1447 spin_unlock(&session->s_cap_lock);
1448} 1447}
1449 1448
1450/* 1449/*
@@ -1875,8 +1874,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
1875 int mds = -1; 1874 int mds = -1;
1876 int err = -EAGAIN; 1875 int err = -EAGAIN;
1877 1876
1878 if (req->r_err || req->r_got_result) 1877 if (req->r_err || req->r_got_result) {
1878 if (req->r_aborted)
1879 __unregister_request(mdsc, req);
1879 goto out; 1880 goto out;
1881 }
1880 1882
1881 if (req->r_timeout && 1883 if (req->r_timeout &&
1882 time_after_eq(jiffies, req->r_started + req->r_timeout)) { 1884 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
@@ -2186,7 +2188,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2186 if (head->safe) { 2188 if (head->safe) {
2187 req->r_got_safe = true; 2189 req->r_got_safe = true;
2188 __unregister_request(mdsc, req); 2190 __unregister_request(mdsc, req);
2189 complete_all(&req->r_safe_completion);
2190 2191
2191 if (req->r_got_unsafe) { 2192 if (req->r_got_unsafe) {
2192 /* 2193 /*
@@ -2238,8 +2239,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2238 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); 2239 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2239 if (err == 0) { 2240 if (err == 0) {
2240 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || 2241 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2241 req->r_op == CEPH_MDS_OP_LSSNAP) && 2242 req->r_op == CEPH_MDS_OP_LSSNAP))
2242 rinfo->dir_nr)
2243 ceph_readdir_prepopulate(req, req->r_session); 2243 ceph_readdir_prepopulate(req, req->r_session);
2244 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); 2244 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2245 } 2245 }
@@ -2490,6 +2490,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2490 cap->seq = 0; /* reset cap seq */ 2490 cap->seq = 0; /* reset cap seq */
2491 cap->issue_seq = 0; /* and issue_seq */ 2491 cap->issue_seq = 0; /* and issue_seq */
2492 cap->mseq = 0; /* and migrate_seq */ 2492 cap->mseq = 0; /* and migrate_seq */
2493 cap->cap_gen = cap->session->s_cap_gen;
2493 2494
2494 if (recon_state->flock) { 2495 if (recon_state->flock) {
2495 rec.v2.cap_id = cpu_to_le64(cap->cap_id); 2496 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
@@ -2552,6 +2553,8 @@ encode_again:
2552 } else { 2553 } else {
2553 err = ceph_pagelist_append(pagelist, &rec, reclen); 2554 err = ceph_pagelist_append(pagelist, &rec, reclen);
2554 } 2555 }
2556
2557 recon_state->nr_caps++;
2555out_free: 2558out_free:
2556 kfree(path); 2559 kfree(path);
2557out_dput: 2560out_dput:
@@ -2579,6 +2582,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2579 struct rb_node *p; 2582 struct rb_node *p;
2580 int mds = session->s_mds; 2583 int mds = session->s_mds;
2581 int err = -ENOMEM; 2584 int err = -ENOMEM;
2585 int s_nr_caps;
2582 struct ceph_pagelist *pagelist; 2586 struct ceph_pagelist *pagelist;
2583 struct ceph_reconnect_state recon_state; 2587 struct ceph_reconnect_state recon_state;
2584 2588
@@ -2610,20 +2614,38 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2610 dout("session %p state %s\n", session, 2614 dout("session %p state %s\n", session,
2611 session_state_name(session->s_state)); 2615 session_state_name(session->s_state));
2612 2616
2617 spin_lock(&session->s_gen_ttl_lock);
2618 session->s_cap_gen++;
2619 spin_unlock(&session->s_gen_ttl_lock);
2620
2621 spin_lock(&session->s_cap_lock);
2622 /*
2623 * notify __ceph_remove_cap() that we are composing cap reconnect.
2624 * If a cap get released before being added to the cap reconnect,
2625 * __ceph_remove_cap() should skip queuing cap release.
2626 */
2627 session->s_cap_reconnect = 1;
2613 /* drop old cap expires; we're about to reestablish that state */ 2628 /* drop old cap expires; we're about to reestablish that state */
2614 discard_cap_releases(mdsc, session); 2629 discard_cap_releases(mdsc, session);
2630 spin_unlock(&session->s_cap_lock);
2615 2631
2616 /* traverse this session's caps */ 2632 /* traverse this session's caps */
2617 err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps); 2633 s_nr_caps = session->s_nr_caps;
2634 err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
2618 if (err) 2635 if (err)
2619 goto fail; 2636 goto fail;
2620 2637
2638 recon_state.nr_caps = 0;
2621 recon_state.pagelist = pagelist; 2639 recon_state.pagelist = pagelist;
2622 recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK; 2640 recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
2623 err = iterate_session_caps(session, encode_caps_cb, &recon_state); 2641 err = iterate_session_caps(session, encode_caps_cb, &recon_state);
2624 if (err < 0) 2642 if (err < 0)
2625 goto fail; 2643 goto fail;
2626 2644
2645 spin_lock(&session->s_cap_lock);
2646 session->s_cap_reconnect = 0;
2647 spin_unlock(&session->s_cap_lock);
2648
2627 /* 2649 /*
2628 * snaprealms. we provide mds with the ino, seq (version), and 2650 * snaprealms. we provide mds with the ino, seq (version), and
2629 * parent for all of our realms. If the mds has any newer info, 2651 * parent for all of our realms. If the mds has any newer info,
@@ -2646,11 +2668,18 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2646 2668
2647 if (recon_state.flock) 2669 if (recon_state.flock)
2648 reply->hdr.version = cpu_to_le16(2); 2670 reply->hdr.version = cpu_to_le16(2);
2649 if (pagelist->length) { 2671
2650 /* set up outbound data if we have any */ 2672 /* raced with cap release? */
2651 reply->hdr.data_len = cpu_to_le32(pagelist->length); 2673 if (s_nr_caps != recon_state.nr_caps) {
2652 ceph_msg_data_add_pagelist(reply, pagelist); 2674 struct page *page = list_first_entry(&pagelist->head,
2675 struct page, lru);
2676 __le32 *addr = kmap_atomic(page);
2677 *addr = cpu_to_le32(recon_state.nr_caps);
2678 kunmap_atomic(addr);
2653 } 2679 }
2680
2681 reply->hdr.data_len = cpu_to_le32(pagelist->length);
2682 ceph_msg_data_add_pagelist(reply, pagelist);
2654 ceph_con_send(&session->s_con, reply); 2683 ceph_con_send(&session->s_con, reply);
2655 2684
2656 mutex_unlock(&session->s_mutex); 2685 mutex_unlock(&session->s_mutex);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index c2a19fbbe517..4c053d099ae4 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -132,6 +132,7 @@ struct ceph_mds_session {
132 struct list_head s_caps; /* all caps issued by this session */ 132 struct list_head s_caps; /* all caps issued by this session */
133 int s_nr_caps, s_trim_caps; 133 int s_nr_caps, s_trim_caps;
134 int s_num_cap_releases; 134 int s_num_cap_releases;
135 int s_cap_reconnect;
135 struct list_head s_cap_releases; /* waiting cap_release messages */ 136 struct list_head s_cap_releases; /* waiting cap_release messages */
136 struct list_head s_cap_releases_done; /* ready to send */ 137 struct list_head s_cap_releases_done; /* ready to send */
137 struct ceph_cap *s_cap_iterator; 138 struct ceph_cap *s_cap_iterator;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 6014b0a3c405..ef4ac38bb614 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -741,13 +741,7 @@ extern int ceph_add_cap(struct inode *inode,
741 int fmode, unsigned issued, unsigned wanted, 741 int fmode, unsigned issued, unsigned wanted,
742 unsigned cap, unsigned seq, u64 realmino, int flags, 742 unsigned cap, unsigned seq, u64 realmino, int flags,
743 struct ceph_cap_reservation *caps_reservation); 743 struct ceph_cap_reservation *caps_reservation);
744extern void __ceph_remove_cap(struct ceph_cap *cap); 744extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
745static inline void ceph_remove_cap(struct ceph_cap *cap)
746{
747 spin_lock(&cap->ci->i_ceph_lock);
748 __ceph_remove_cap(cap);
749 spin_unlock(&cap->ci->i_ceph_lock);
750}
751extern void ceph_put_cap(struct ceph_mds_client *mdsc, 745extern void ceph_put_cap(struct ceph_mds_client *mdsc,
752 struct ceph_cap *cap); 746 struct ceph_cap *cap);
753 747
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d9ea7ada1378..f918a998a087 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -384,6 +384,7 @@ struct smb_version_operations {
384 int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file, 384 int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file,
385 struct cifsFileInfo *target_file, u64 src_off, u64 len, 385 struct cifsFileInfo *target_file, u64 src_off, u64 len,
386 u64 dest_off); 386 u64 dest_off);
387 int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
387}; 388};
388 389
389struct smb_version_values { 390struct smb_version_values {
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 409b45eefe70..77492301cc2b 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -26,13 +26,15 @@
26#include <linux/mount.h> 26#include <linux/mount.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/pagemap.h> 28#include <linux/pagemap.h>
29#include <linux/btrfs.h>
30#include "cifspdu.h" 29#include "cifspdu.h"
31#include "cifsglob.h" 30#include "cifsglob.h"
32#include "cifsproto.h" 31#include "cifsproto.h"
33#include "cifs_debug.h" 32#include "cifs_debug.h"
34#include "cifsfs.h" 33#include "cifsfs.h"
35 34
35#define CIFS_IOCTL_MAGIC 0xCF
36#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
37
36static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, 38static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
37 unsigned long srcfd, u64 off, u64 len, u64 destoff) 39 unsigned long srcfd, u64 off, u64 len, u64 destoff)
38{ 40{
@@ -213,7 +215,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
213 cifs_dbg(FYI, "set compress flag rc %d\n", rc); 215 cifs_dbg(FYI, "set compress flag rc %d\n", rc);
214 } 216 }
215 break; 217 break;
216 case BTRFS_IOC_CLONE: 218 case CIFS_IOC_COPYCHUNK_FILE:
217 rc = cifs_ioctl_clone(xid, filep, arg, 0, 0, 0); 219 rc = cifs_ioctl_clone(xid, filep, arg, 0, 0, 0);
218 break; 220 break;
219 default: 221 default:
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 11dde4b24f8a..757da3e54d3d 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -532,7 +532,10 @@ smb2_clone_range(const unsigned int xid,
532 int rc; 532 int rc;
533 unsigned int ret_data_len; 533 unsigned int ret_data_len;
534 struct copychunk_ioctl *pcchunk; 534 struct copychunk_ioctl *pcchunk;
535 char *retbuf = NULL; 535 struct copychunk_ioctl_rsp *retbuf = NULL;
536 struct cifs_tcon *tcon;
537 int chunks_copied = 0;
538 bool chunk_sizes_updated = false;
536 539
537 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); 540 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
538 541
@@ -547,27 +550,96 @@ smb2_clone_range(const unsigned int xid,
547 550
548 /* Note: request_res_key sets res_key null only if rc !=0 */ 551 /* Note: request_res_key sets res_key null only if rc !=0 */
549 if (rc) 552 if (rc)
550 return rc; 553 goto cchunk_out;
551 554
552 /* For now array only one chunk long, will make more flexible later */ 555 /* For now array only one chunk long, will make more flexible later */
553 pcchunk->ChunkCount = __constant_cpu_to_le32(1); 556 pcchunk->ChunkCount = __constant_cpu_to_le32(1);
554 pcchunk->Reserved = 0; 557 pcchunk->Reserved = 0;
555 pcchunk->SourceOffset = cpu_to_le64(src_off);
556 pcchunk->TargetOffset = cpu_to_le64(dest_off);
557 pcchunk->Length = cpu_to_le32(len);
558 pcchunk->Reserved2 = 0; 558 pcchunk->Reserved2 = 0;
559 559
560 /* Request that server copy to target from src file identified by key */ 560 tcon = tlink_tcon(trgtfile->tlink);
561 rc = SMB2_ioctl(xid, tlink_tcon(trgtfile->tlink),
562 trgtfile->fid.persistent_fid,
563 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
564 true /* is_fsctl */, (char *)pcchunk,
565 sizeof(struct copychunk_ioctl), &retbuf, &ret_data_len);
566 561
567 /* BB need to special case rc = EINVAL to alter chunk size */ 562 while (len > 0) {
563 pcchunk->SourceOffset = cpu_to_le64(src_off);
564 pcchunk->TargetOffset = cpu_to_le64(dest_off);
565 pcchunk->Length =
566 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
568 567
569 cifs_dbg(FYI, "rc %d data length out %d\n", rc, ret_data_len); 568 /* Request server copy to target from src identified by key */
569 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
570 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
571 true /* is_fsctl */, (char *)pcchunk,
572 sizeof(struct copychunk_ioctl), (char **)&retbuf,
573 &ret_data_len);
574 if (rc == 0) {
575 if (ret_data_len !=
576 sizeof(struct copychunk_ioctl_rsp)) {
577 cifs_dbg(VFS, "invalid cchunk response size\n");
578 rc = -EIO;
579 goto cchunk_out;
580 }
581 if (retbuf->TotalBytesWritten == 0) {
582 cifs_dbg(FYI, "no bytes copied\n");
583 rc = -EIO;
584 goto cchunk_out;
585 }
586 /*
587 * Check if server claimed to write more than we asked
588 */
589 if (le32_to_cpu(retbuf->TotalBytesWritten) >
590 le32_to_cpu(pcchunk->Length)) {
591 cifs_dbg(VFS, "invalid copy chunk response\n");
592 rc = -EIO;
593 goto cchunk_out;
594 }
595 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
596 cifs_dbg(VFS, "invalid num chunks written\n");
597 rc = -EIO;
598 goto cchunk_out;
599 }
600 chunks_copied++;
601
602 src_off += le32_to_cpu(retbuf->TotalBytesWritten);
603 dest_off += le32_to_cpu(retbuf->TotalBytesWritten);
604 len -= le32_to_cpu(retbuf->TotalBytesWritten);
605
606 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n",
607 le32_to_cpu(retbuf->ChunksWritten),
608 le32_to_cpu(retbuf->ChunkBytesWritten),
609 le32_to_cpu(retbuf->TotalBytesWritten));
610 } else if (rc == -EINVAL) {
611 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
612 goto cchunk_out;
613
614 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
615 le32_to_cpu(retbuf->ChunksWritten),
616 le32_to_cpu(retbuf->ChunkBytesWritten),
617 le32_to_cpu(retbuf->TotalBytesWritten));
618
619 /*
620 * Check if this is the first request using these sizes,
621 * (ie check if copy succeed once with original sizes
622 * and check if the server gave us different sizes after
623 * we already updated max sizes on previous request).
624 * if not then why is the server returning an error now
625 */
626 if ((chunks_copied != 0) || chunk_sizes_updated)
627 goto cchunk_out;
628
629 /* Check that server is not asking us to grow size */
630 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
631 tcon->max_bytes_chunk)
632 tcon->max_bytes_chunk =
633 le32_to_cpu(retbuf->ChunkBytesWritten);
634 else
635 goto cchunk_out; /* server gave us bogus size */
636
637 /* No need to change MaxChunks since already set to 1 */
638 chunk_sizes_updated = true;
639 }
640 }
570 641
642cchunk_out:
571 kfree(pcchunk); 643 kfree(pcchunk);
572 return rc; 644 return rc;
573} 645}
@@ -1247,6 +1319,7 @@ struct smb_version_operations smb30_operations = {
1247 .create_lease_buf = smb3_create_lease_buf, 1319 .create_lease_buf = smb3_create_lease_buf,
1248 .parse_lease_buf = smb3_parse_lease_buf, 1320 .parse_lease_buf = smb3_parse_lease_buf,
1249 .clone_range = smb2_clone_range, 1321 .clone_range = smb2_clone_range,
1322 .validate_negotiate = smb3_validate_negotiate,
1250}; 1323};
1251 1324
1252struct smb_version_values smb20_values = { 1325struct smb_version_values smb20_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index d65270c290a1..2013234b73ad 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -454,6 +454,81 @@ neg_exit:
454 return rc; 454 return rc;
455} 455}
456 456
457int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
458{
459 int rc = 0;
460 struct validate_negotiate_info_req vneg_inbuf;
461 struct validate_negotiate_info_rsp *pneg_rsp;
462 u32 rsplen;
463
464 cifs_dbg(FYI, "validate negotiate\n");
465
466 /*
467 * validation ioctl must be signed, so no point sending this if we
468 * can not sign it. We could eventually change this to selectively
469 * sign just this, the first and only signed request on a connection.
470 * This is good enough for now since a user who wants better security
471 * would also enable signing on the mount. Having validation of
472 * negotiate info for signed connections helps reduce attack vectors
473 */
474 if (tcon->ses->server->sign == false)
475 return 0; /* validation requires signing */
476
477 vneg_inbuf.Capabilities =
478 cpu_to_le32(tcon->ses->server->vals->req_capabilities);
479 memcpy(vneg_inbuf.Guid, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
480
481 if (tcon->ses->sign)
482 vneg_inbuf.SecurityMode =
483 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
484 else if (global_secflags & CIFSSEC_MAY_SIGN)
485 vneg_inbuf.SecurityMode =
486 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
487 else
488 vneg_inbuf.SecurityMode = 0;
489
490 vneg_inbuf.DialectCount = cpu_to_le16(1);
491 vneg_inbuf.Dialects[0] =
492 cpu_to_le16(tcon->ses->server->vals->protocol_id);
493
494 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
495 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
496 (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
497 (char **)&pneg_rsp, &rsplen);
498
499 if (rc != 0) {
500 cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
501 return -EIO;
502 }
503
504 if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
505 cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
506 return -EIO;
507 }
508
509 /* check validate negotiate info response matches what we got earlier */
510 if (pneg_rsp->Dialect !=
511 cpu_to_le16(tcon->ses->server->vals->protocol_id))
512 goto vneg_out;
513
514 if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
515 goto vneg_out;
516
517 /* do not validate server guid because not saved at negprot time yet */
518
519 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
520 SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
521 goto vneg_out;
522
523 /* validate negotiate successful */
524 cifs_dbg(FYI, "validate negotiate info successful\n");
525 return 0;
526
527vneg_out:
528 cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
529 return -EIO;
530}
531
457int 532int
458SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, 533SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
459 const struct nls_table *nls_cp) 534 const struct nls_table *nls_cp)
@@ -829,6 +904,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
829 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) 904 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
830 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); 905 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
831 init_copy_chunk_defaults(tcon); 906 init_copy_chunk_defaults(tcon);
907 if (tcon->ses->server->ops->validate_negotiate)
908 rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
832tcon_exit: 909tcon_exit:
833 free_rsp_buf(resp_buftype, rsp); 910 free_rsp_buf(resp_buftype, rsp);
834 kfree(unc_path); 911 kfree(unc_path);
@@ -1214,10 +1291,17 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1214 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0); 1291 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1215 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; 1292 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
1216 1293
1217 if (rc != 0) { 1294 if ((rc != 0) && (rc != -EINVAL)) {
1218 if (tcon) 1295 if (tcon)
1219 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 1296 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1220 goto ioctl_exit; 1297 goto ioctl_exit;
1298 } else if (rc == -EINVAL) {
1299 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
1300 (opcode != FSCTL_SRV_COPYCHUNK)) {
1301 if (tcon)
1302 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1303 goto ioctl_exit;
1304 }
1221 } 1305 }
1222 1306
1223 /* check if caller wants to look at return data or just return rc */ 1307 /* check if caller wants to look at return data or just return rc */
@@ -2154,11 +2238,9 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
2154 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0); 2238 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
2155 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base; 2239 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
2156 2240
2157 if (rc != 0) { 2241 if (rc != 0)
2158 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); 2242 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
2159 goto out; 2243
2160 }
2161out:
2162 free_rsp_buf(resp_buftype, rsp); 2244 free_rsp_buf(resp_buftype, rsp);
2163 kfree(iov); 2245 kfree(iov);
2164 return rc; 2246 return rc;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index f88320bbb477..2022c542ea3a 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -577,13 +577,19 @@ struct copychunk_ioctl_rsp {
577 __le32 TotalBytesWritten; 577 __le32 TotalBytesWritten;
578} __packed; 578} __packed;
579 579
580/* Response and Request are the same format */ 580struct validate_negotiate_info_req {
581struct validate_negotiate_info {
582 __le32 Capabilities; 581 __le32 Capabilities;
583 __u8 Guid[SMB2_CLIENT_GUID_SIZE]; 582 __u8 Guid[SMB2_CLIENT_GUID_SIZE];
584 __le16 SecurityMode; 583 __le16 SecurityMode;
585 __le16 DialectCount; 584 __le16 DialectCount;
586 __le16 Dialect[1]; 585 __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
586} __packed;
587
588struct validate_negotiate_info_rsp {
589 __le32 Capabilities;
590 __u8 Guid[SMB2_CLIENT_GUID_SIZE];
591 __le16 SecurityMode;
592 __le16 Dialect; /* Dialect in use for the connection */
587} __packed; 593} __packed;
588 594
589#define RSS_CAPABLE 0x00000001 595#define RSS_CAPABLE 0x00000001
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index b4eea105b08c..93adc64666f3 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -162,5 +162,6 @@ extern int smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
162 struct smb2_lock_element *buf); 162 struct smb2_lock_element *buf);
163extern int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, 163extern int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
164 __u8 *lease_key, const __le32 lease_state); 164 __u8 *lease_key, const __le32 lease_state);
165extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
165 166
166#endif /* _SMB2PROTO_H */ 167#endif /* _SMB2PROTO_H */
diff --git a/fs/cifs/smbfsctl.h b/fs/cifs/smbfsctl.h
index a4b2391fe66e..0e538b5c9622 100644
--- a/fs/cifs/smbfsctl.h
+++ b/fs/cifs/smbfsctl.h
@@ -90,7 +90,7 @@
90#define FSCTL_LMR_REQUEST_RESILIENCY 0x001401D4 /* BB add struct */ 90#define FSCTL_LMR_REQUEST_RESILIENCY 0x001401D4 /* BB add struct */
91#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */ 91#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */
92#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */ 92#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */
93#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204 /* BB add struct */ 93#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204
94/* Perform server-side data movement */ 94/* Perform server-side data movement */
95#define FSCTL_SRV_COPYCHUNK 0x001440F2 95#define FSCTL_SRV_COPYCHUNK 0x001440F2
96#define FSCTL_SRV_COPYCHUNK_WRITE 0x001480F2 96#define FSCTL_SRV_COPYCHUNK_WRITE 0x001480F2
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 277bd1be21fd..e081acbac2e7 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -56,29 +56,28 @@ static void configfs_d_iput(struct dentry * dentry,
56 struct configfs_dirent *sd = dentry->d_fsdata; 56 struct configfs_dirent *sd = dentry->d_fsdata;
57 57
58 if (sd) { 58 if (sd) {
59 BUG_ON(sd->s_dentry != dentry);
60 /* Coordinate with configfs_readdir */ 59 /* Coordinate with configfs_readdir */
61 spin_lock(&configfs_dirent_lock); 60 spin_lock(&configfs_dirent_lock);
62 sd->s_dentry = NULL; 61 /* Coordinate with configfs_attach_attr where will increase
62 * sd->s_count and update sd->s_dentry to new allocated one.
63 * Only set sd->dentry to null when this dentry is the only
64 * sd owner.
65 * If not do so, configfs_d_iput may run just after
66 * configfs_attach_attr and set sd->s_dentry to null
67 * even it's still in use.
68 */
69 if (atomic_read(&sd->s_count) <= 2)
70 sd->s_dentry = NULL;
71
63 spin_unlock(&configfs_dirent_lock); 72 spin_unlock(&configfs_dirent_lock);
64 configfs_put(sd); 73 configfs_put(sd);
65 } 74 }
66 iput(inode); 75 iput(inode);
67} 76}
68 77
69/*
70 * We _must_ delete our dentries on last dput, as the chain-to-parent
71 * behavior is required to clear the parents of default_groups.
72 */
73static int configfs_d_delete(const struct dentry *dentry)
74{
75 return 1;
76}
77
78const struct dentry_operations configfs_dentry_ops = { 78const struct dentry_operations configfs_dentry_ops = {
79 .d_iput = configfs_d_iput, 79 .d_iput = configfs_d_iput,
80 /* simple_delete_dentry() isn't exported */ 80 .d_delete = always_delete_dentry,
81 .d_delete = configfs_d_delete,
82}; 81};
83 82
84#ifdef CONFIG_LOCKDEP 83#ifdef CONFIG_LOCKDEP
@@ -426,8 +425,11 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
426 struct configfs_attribute * attr = sd->s_element; 425 struct configfs_attribute * attr = sd->s_element;
427 int error; 426 int error;
428 427
428 spin_lock(&configfs_dirent_lock);
429 dentry->d_fsdata = configfs_get(sd); 429 dentry->d_fsdata = configfs_get(sd);
430 sd->s_dentry = dentry; 430 sd->s_dentry = dentry;
431 spin_unlock(&configfs_dirent_lock);
432
431 error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG, 433 error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
432 configfs_init_file); 434 configfs_init_file);
433 if (error) { 435 if (error) {
diff --git a/fs/coredump.c b/fs/coredump.c
index 62406b6959b6..bc3fbcd32558 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -695,7 +695,7 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
695 while (nr) { 695 while (nr) {
696 if (dump_interrupted()) 696 if (dump_interrupted())
697 return 0; 697 return 0;
698 n = vfs_write(file, addr, nr, &pos); 698 n = __kernel_write(file, addr, nr, &pos);
699 if (n <= 0) 699 if (n <= 0)
700 return 0; 700 return 0;
701 file->f_pos = pos; 701 file->f_pos = pos;
@@ -733,7 +733,7 @@ int dump_align(struct coredump_params *cprm, int align)
733{ 733{
734 unsigned mod = cprm->written & (align - 1); 734 unsigned mod = cprm->written & (align - 1);
735 if (align & (align - 1)) 735 if (align & (align - 1))
736 return -EINVAL; 736 return 0;
737 return mod ? dump_skip(cprm, align - mod) : 0; 737 return mod ? dump_skip(cprm, align - mod) : 1;
738} 738}
739EXPORT_SYMBOL(dump_align); 739EXPORT_SYMBOL(dump_align);
diff --git a/fs/dcache.c b/fs/dcache.c
index 0a38ef8d7f00..6055d61811d3 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -88,35 +88,6 @@ EXPORT_SYMBOL(rename_lock);
88 88
89static struct kmem_cache *dentry_cache __read_mostly; 89static struct kmem_cache *dentry_cache __read_mostly;
90 90
91/**
92 * read_seqbegin_or_lock - begin a sequence number check or locking block
93 * @lock: sequence lock
94 * @seq : sequence number to be checked
95 *
96 * First try it once optimistically without taking the lock. If that fails,
97 * take the lock. The sequence number is also used as a marker for deciding
98 * whether to be a reader (even) or writer (odd).
99 * N.B. seq must be initialized to an even number to begin with.
100 */
101static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
102{
103 if (!(*seq & 1)) /* Even */
104 *seq = read_seqbegin(lock);
105 else /* Odd */
106 read_seqlock_excl(lock);
107}
108
109static inline int need_seqretry(seqlock_t *lock, int seq)
110{
111 return !(seq & 1) && read_seqretry(lock, seq);
112}
113
114static inline void done_seqretry(seqlock_t *lock, int seq)
115{
116 if (seq & 1)
117 read_sequnlock_excl(lock);
118}
119
120/* 91/*
121 * This is the single most critical data structure when it comes 92 * This is the single most critical data structure when it comes
122 * to the dcache: the hashtable for lookups. Somebody should try 93 * to the dcache: the hashtable for lookups. Somebody should try
@@ -125,8 +96,6 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
125 * This hash-function tries to avoid losing too many bits of hash 96 * This hash-function tries to avoid losing too many bits of hash
126 * information, yet avoid using a prime hash-size or similar. 97 * information, yet avoid using a prime hash-size or similar.
127 */ 98 */
128#define D_HASHBITS d_hash_shift
129#define D_HASHMASK d_hash_mask
130 99
131static unsigned int d_hash_mask __read_mostly; 100static unsigned int d_hash_mask __read_mostly;
132static unsigned int d_hash_shift __read_mostly; 101static unsigned int d_hash_shift __read_mostly;
@@ -137,8 +106,8 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
137 unsigned int hash) 106 unsigned int hash)
138{ 107{
139 hash += (unsigned long) parent / L1_CACHE_BYTES; 108 hash += (unsigned long) parent / L1_CACHE_BYTES;
140 hash = hash + (hash >> D_HASHBITS); 109 hash = hash + (hash >> d_hash_shift);
141 return dentry_hashtable + (hash & D_HASHMASK); 110 return dentry_hashtable + (hash & d_hash_mask);
142} 111}
143 112
144/* Statistics gathering. */ 113/* Statistics gathering. */
@@ -223,7 +192,7 @@ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char
223 if (!tcount) 192 if (!tcount)
224 return 0; 193 return 0;
225 } 194 }
226 mask = ~(~0ul << tcount*8); 195 mask = bytemask_from_count(tcount);
227 return unlikely(!!((a ^ b) & mask)); 196 return unlikely(!!((a ^ b) & mask));
228} 197}
229 198
@@ -469,7 +438,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
469{ 438{
470 list_del(&dentry->d_u.d_child); 439 list_del(&dentry->d_u.d_child);
471 /* 440 /*
472 * Inform try_to_ascend() that we are no longer attached to the 441 * Inform d_walk() that we are no longer attached to the
473 * dentry tree 442 * dentry tree
474 */ 443 */
475 dentry->d_flags |= DCACHE_DENTRY_KILLED; 444 dentry->d_flags |= DCACHE_DENTRY_KILLED;
@@ -1069,34 +1038,6 @@ void shrink_dcache_sb(struct super_block *sb)
1069} 1038}
1070EXPORT_SYMBOL(shrink_dcache_sb); 1039EXPORT_SYMBOL(shrink_dcache_sb);
1071 1040
1072/*
1073 * This tries to ascend one level of parenthood, but
1074 * we can race with renaming, so we need to re-check
1075 * the parenthood after dropping the lock and check
1076 * that the sequence number still matches.
1077 */
1078static struct dentry *try_to_ascend(struct dentry *old, unsigned seq)
1079{
1080 struct dentry *new = old->d_parent;
1081
1082 rcu_read_lock();
1083 spin_unlock(&old->d_lock);
1084 spin_lock(&new->d_lock);
1085
1086 /*
1087 * might go back up the wrong parent if we have had a rename
1088 * or deletion
1089 */
1090 if (new != old->d_parent ||
1091 (old->d_flags & DCACHE_DENTRY_KILLED) ||
1092 need_seqretry(&rename_lock, seq)) {
1093 spin_unlock(&new->d_lock);
1094 new = NULL;
1095 }
1096 rcu_read_unlock();
1097 return new;
1098}
1099
1100/** 1041/**
1101 * enum d_walk_ret - action to talke during tree walk 1042 * enum d_walk_ret - action to talke during tree walk
1102 * @D_WALK_CONTINUE: contrinue walk 1043 * @D_WALK_CONTINUE: contrinue walk
@@ -1185,9 +1126,24 @@ resume:
1185 */ 1126 */
1186 if (this_parent != parent) { 1127 if (this_parent != parent) {
1187 struct dentry *child = this_parent; 1128 struct dentry *child = this_parent;
1188 this_parent = try_to_ascend(this_parent, seq); 1129 this_parent = child->d_parent;
1189 if (!this_parent) 1130
1131 rcu_read_lock();
1132 spin_unlock(&child->d_lock);
1133 spin_lock(&this_parent->d_lock);
1134
1135 /*
1136 * might go back up the wrong parent if we have had a rename
1137 * or deletion
1138 */
1139 if (this_parent != child->d_parent ||
1140 (child->d_flags & DCACHE_DENTRY_KILLED) ||
1141 need_seqretry(&rename_lock, seq)) {
1142 spin_unlock(&this_parent->d_lock);
1143 rcu_read_unlock();
1190 goto rename_retry; 1144 goto rename_retry;
1145 }
1146 rcu_read_unlock();
1191 next = child->d_u.d_child.next; 1147 next = child->d_u.d_child.next;
1192 goto resume; 1148 goto resume;
1193 } 1149 }
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index 60a327863b11..e7cfbaf8d0e2 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -74,14 +74,16 @@ static int user_cmd(struct sk_buff *skb, struct genl_info *info)
74 return 0; 74 return 0;
75} 75}
76 76
77static struct genl_ops dlm_nl_ops = { 77static struct genl_ops dlm_nl_ops[] = {
78 .cmd = DLM_CMD_HELLO, 78 {
79 .doit = user_cmd, 79 .cmd = DLM_CMD_HELLO,
80 .doit = user_cmd,
81 },
80}; 82};
81 83
82int __init dlm_netlink_init(void) 84int __init dlm_netlink_init(void)
83{ 85{
84 return genl_register_family_with_ops(&family, &dlm_nl_ops, 1); 86 return genl_register_family_with_ops(&family, dlm_nl_ops);
85} 87}
86 88
87void dlm_netlink_exit(void) 89void dlm_netlink_exit(void)
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 2229a74aeeed..b1eaa7a1f82c 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -313,11 +313,9 @@ static int ecryptfs_fasync(int fd, struct file *file, int flag)
313static long 313static long
314ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 314ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
315{ 315{
316 struct file *lower_file = NULL; 316 struct file *lower_file = ecryptfs_file_to_lower(file);
317 long rc = -ENOTTY; 317 long rc = -ENOTTY;
318 318
319 if (ecryptfs_file_to_private(file))
320 lower_file = ecryptfs_file_to_lower(file);
321 if (lower_file->f_op->unlocked_ioctl) 319 if (lower_file->f_op->unlocked_ioctl)
322 rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg); 320 rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
323 return rc; 321 return rc;
@@ -327,11 +325,9 @@ ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
327static long 325static long
328ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 326ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
329{ 327{
330 struct file *lower_file = NULL; 328 struct file *lower_file = ecryptfs_file_to_lower(file);
331 long rc = -ENOIOCTLCMD; 329 long rc = -ENOIOCTLCMD;
332 330
333 if (ecryptfs_file_to_private(file))
334 lower_file = ecryptfs_file_to_lower(file);
335 if (lower_file->f_op && lower_file->f_op->compat_ioctl) 331 if (lower_file->f_op && lower_file->f_op->compat_ioctl)
336 rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg); 332 rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
337 return rc; 333 return rc;
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index a8766b880c07..becc725a1953 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -83,19 +83,10 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr)
83 return 0; 83 return 0;
84} 84}
85 85
86/*
87 * Retaining negative dentries for an in-memory filesystem just wastes
88 * memory and lookup time: arrange for them to be deleted immediately.
89 */
90static int efivarfs_delete_dentry(const struct dentry *dentry)
91{
92 return 1;
93}
94
95static struct dentry_operations efivarfs_d_ops = { 86static struct dentry_operations efivarfs_d_ops = {
96 .d_compare = efivarfs_d_compare, 87 .d_compare = efivarfs_d_compare,
97 .d_hash = efivarfs_d_hash, 88 .d_hash = efivarfs_d_hash,
98 .d_delete = efivarfs_delete_dentry, 89 .d_delete = always_delete_dentry,
99}; 90};
100 91
101static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name) 92static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 79b65c3b9e87..8b5e2584c840 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1852,8 +1852,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1852 goto error_tgt_fput; 1852 goto error_tgt_fput;
1853 1853
1854 /* Check if EPOLLWAKEUP is allowed */ 1854 /* Check if EPOLLWAKEUP is allowed */
1855 if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) 1855 ep_take_care_of_epollwakeup(&epds);
1856 epds.events &= ~EPOLLWAKEUP;
1857 1856
1858 /* 1857 /*
1859 * We have to check that the file structure underneath the file descriptor 1858 * We have to check that the file structure underneath the file descriptor
diff --git a/fs/exec.c b/fs/exec.c
index 977319fd77f3..7ea097f6b341 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1380,10 +1380,6 @@ int search_binary_handler(struct linux_binprm *bprm)
1380 if (retval) 1380 if (retval)
1381 return retval; 1381 return retval;
1382 1382
1383 retval = audit_bprm(bprm);
1384 if (retval)
1385 return retval;
1386
1387 retval = -ENOENT; 1383 retval = -ENOENT;
1388 retry: 1384 retry:
1389 read_lock(&binfmt_lock); 1385 read_lock(&binfmt_lock);
@@ -1431,6 +1427,7 @@ static int exec_binprm(struct linux_binprm *bprm)
1431 1427
1432 ret = search_binary_handler(bprm); 1428 ret = search_binary_handler(bprm);
1433 if (ret >= 0) { 1429 if (ret >= 0) {
1430 audit_bprm(bprm);
1434 trace_sched_process_exec(current, old_pid, bprm); 1431 trace_sched_process_exec(current, old_pid, bprm);
1435 ptrace_event(PTRACE_EVENT_EXEC, old_vpid); 1432 ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1436 current->did_exec = 1; 1433 current->did_exec = 1;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index e66a8009aff1..c8420f7e4db6 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1899,7 +1899,8 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1899 gi->nhash = 0; 1899 gi->nhash = 0;
1900 } 1900 }
1901 /* Skip entries for other sb and dead entries */ 1901 /* Skip entries for other sb and dead entries */
1902 } while (gi->sdp != gi->gl->gl_sbd || __lockref_is_dead(&gl->gl_lockref)); 1902 } while (gi->sdp != gi->gl->gl_sbd ||
1903 __lockref_is_dead(&gi->gl->gl_lockref));
1903 1904
1904 return 0; 1905 return 0;
1905} 1906}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 1615df16cf4e..7119504159f1 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1171,8 +1171,11 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
1171 if (d != NULL) 1171 if (d != NULL)
1172 dentry = d; 1172 dentry = d;
1173 if (dentry->d_inode) { 1173 if (dentry->d_inode) {
1174 if (!(*opened & FILE_OPENED)) 1174 if (!(*opened & FILE_OPENED)) {
1175 if (d == NULL)
1176 dget(dentry);
1175 return finish_no_open(file, dentry); 1177 return finish_no_open(file, dentry);
1178 }
1176 dput(d); 1179 dput(d);
1177 return 0; 1180 return 0;
1178 } 1181 }
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index c8423d6de6c3..2a6ba06bee6f 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -466,19 +466,19 @@ static void gdlm_cancel(struct gfs2_glock *gl)
466static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen, 466static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen,
467 char *lvb_bits) 467 char *lvb_bits)
468{ 468{
469 uint32_t gen; 469 __le32 gen;
470 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); 470 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE);
471 memcpy(&gen, lvb_bits, sizeof(uint32_t)); 471 memcpy(&gen, lvb_bits, sizeof(__le32));
472 *lvb_gen = le32_to_cpu(gen); 472 *lvb_gen = le32_to_cpu(gen);
473} 473}
474 474
475static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, 475static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
476 char *lvb_bits) 476 char *lvb_bits)
477{ 477{
478 uint32_t gen; 478 __le32 gen;
479 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); 479 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE);
480 gen = cpu_to_le32(lvb_gen); 480 gen = cpu_to_le32(lvb_gen);
481 memcpy(ls->ls_control_lvb, &gen, sizeof(uint32_t)); 481 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32));
482} 482}
483 483
484static int all_jid_bits_clear(char *lvb) 484static int all_jid_bits_clear(char *lvb)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 453b50eaddec..98236d0df3ca 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -667,7 +667,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
667 struct buffer_head *bh; 667 struct buffer_head *bh;
668 struct page *page; 668 struct page *page;
669 void *kaddr, *ptr; 669 void *kaddr, *ptr;
670 struct gfs2_quota q, *qp; 670 struct gfs2_quota q;
671 int err, nbytes; 671 int err, nbytes;
672 u64 size; 672 u64 size;
673 673
@@ -683,28 +683,25 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
683 return err; 683 return err;
684 684
685 err = -EIO; 685 err = -EIO;
686 qp = &q; 686 be64_add_cpu(&q.qu_value, change);
687 qp->qu_value = be64_to_cpu(qp->qu_value); 687 qd->qd_qb.qb_value = q.qu_value;
688 qp->qu_value += change;
689 qp->qu_value = cpu_to_be64(qp->qu_value);
690 qd->qd_qb.qb_value = qp->qu_value;
691 if (fdq) { 688 if (fdq) {
692 if (fdq->d_fieldmask & FS_DQ_BSOFT) { 689 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
693 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); 690 q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
694 qd->qd_qb.qb_warn = qp->qu_warn; 691 qd->qd_qb.qb_warn = q.qu_warn;
695 } 692 }
696 if (fdq->d_fieldmask & FS_DQ_BHARD) { 693 if (fdq->d_fieldmask & FS_DQ_BHARD) {
697 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); 694 q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
698 qd->qd_qb.qb_limit = qp->qu_limit; 695 qd->qd_qb.qb_limit = q.qu_limit;
699 } 696 }
700 if (fdq->d_fieldmask & FS_DQ_BCOUNT) { 697 if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
701 qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); 698 q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
702 qd->qd_qb.qb_value = qp->qu_value; 699 qd->qd_qb.qb_value = q.qu_value;
703 } 700 }
704 } 701 }
705 702
706 /* Write the quota into the quota file on disk */ 703 /* Write the quota into the quota file on disk */
707 ptr = qp; 704 ptr = &q;
708 nbytes = sizeof(struct gfs2_quota); 705 nbytes = sizeof(struct gfs2_quota);
709get_a_page: 706get_a_page:
710 page = find_or_create_page(mapping, index, GFP_NOFS); 707 page = find_or_create_page(mapping, index, GFP_NOFS);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 4d83abdd5635..c8d6161bd682 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1127,7 +1127,7 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1127 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); 1127 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1128 rgd->rd_free_clone = rgd->rd_free; 1128 rgd->rd_free_clone = rgd->rd_free;
1129 } 1129 }
1130 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) { 1130 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1131 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd)); 1131 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1132 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, 1132 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1133 rgd->rd_bits[0].bi_bh->b_data); 1133 rgd->rd_bits[0].bi_bh->b_data);
@@ -1161,7 +1161,7 @@ int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1161 if (rgd->rd_flags & GFS2_RDF_UPTODATE) 1161 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1162 return 0; 1162 return 0;
1163 1163
1164 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) 1164 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1165 return gfs2_rgrp_bh_get(rgd); 1165 return gfs2_rgrp_bh_get(rgd);
1166 1166
1167 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags); 1167 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index b51a6079108d..e9a97a0d4314 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -24,13 +24,6 @@ struct hfsplus_wd {
24 u16 embed_count; 24 u16 embed_count;
25}; 25};
26 26
27static void hfsplus_end_io_sync(struct bio *bio, int err)
28{
29 if (err)
30 clear_bit(BIO_UPTODATE, &bio->bi_flags);
31 complete(bio->bi_private);
32}
33
34/* 27/*
35 * hfsplus_submit_bio - Perfrom block I/O 28 * hfsplus_submit_bio - Perfrom block I/O
36 * @sb: super block of volume for I/O 29 * @sb: super block of volume for I/O
@@ -53,7 +46,6 @@ static void hfsplus_end_io_sync(struct bio *bio, int err)
53int hfsplus_submit_bio(struct super_block *sb, sector_t sector, 46int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
54 void *buf, void **data, int rw) 47 void *buf, void **data, int rw)
55{ 48{
56 DECLARE_COMPLETION_ONSTACK(wait);
57 struct bio *bio; 49 struct bio *bio;
58 int ret = 0; 50 int ret = 0;
59 u64 io_size; 51 u64 io_size;
@@ -73,8 +65,6 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
73 bio = bio_alloc(GFP_NOIO, 1); 65 bio = bio_alloc(GFP_NOIO, 1);
74 bio->bi_sector = sector; 66 bio->bi_sector = sector;
75 bio->bi_bdev = sb->s_bdev; 67 bio->bi_bdev = sb->s_bdev;
76 bio->bi_end_io = hfsplus_end_io_sync;
77 bio->bi_private = &wait;
78 68
79 if (!(rw & WRITE) && data) 69 if (!(rw & WRITE) && data)
80 *data = (u8 *)buf + offset; 70 *data = (u8 *)buf + offset;
@@ -93,12 +83,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
93 buf = (u8 *)buf + len; 83 buf = (u8 *)buf + len;
94 } 84 }
95 85
96 submit_bio(rw, bio); 86 ret = submit_bio_wait(rw, bio);
97 wait_for_completion(&wait);
98
99 if (!bio_flagged(bio, BIO_UPTODATE))
100 ret = -EIO;
101
102out: 87out:
103 bio_put(bio); 88 bio_put(bio);
104 return ret < 0 ? ret : 0; 89 return ret < 0 ? ret : 0;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 25437280a207..db23ce1bd903 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -33,15 +33,6 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
33 33
34#define FILE_HOSTFS_I(file) HOSTFS_I(file_inode(file)) 34#define FILE_HOSTFS_I(file) HOSTFS_I(file_inode(file))
35 35
36static int hostfs_d_delete(const struct dentry *dentry)
37{
38 return 1;
39}
40
41static const struct dentry_operations hostfs_dentry_ops = {
42 .d_delete = hostfs_d_delete,
43};
44
45/* Changed in hostfs_args before the kernel starts running */ 36/* Changed in hostfs_args before the kernel starts running */
46static char *root_ino = ""; 37static char *root_ino = "";
47static int append = 0; 38static int append = 0;
@@ -925,7 +916,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
925 sb->s_blocksize_bits = 10; 916 sb->s_blocksize_bits = 10;
926 sb->s_magic = HOSTFS_SUPER_MAGIC; 917 sb->s_magic = HOSTFS_SUPER_MAGIC;
927 sb->s_op = &hostfs_sbops; 918 sb->s_op = &hostfs_sbops;
928 sb->s_d_op = &hostfs_dentry_ops; 919 sb->s_d_op = &simple_dentry_operations;
929 sb->s_maxbytes = MAX_LFS_FILESIZE; 920 sb->s_maxbytes = MAX_LFS_FILESIZE;
930 921
931 /* NULL is printed as <NULL> by sprintf: avoid that. */ 922 /* NULL is printed as <NULL> by sprintf: avoid that. */
diff --git a/fs/libfs.c b/fs/libfs.c
index 5de06947ba5e..a1844244246f 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -47,10 +47,16 @@ EXPORT_SYMBOL(simple_statfs);
47 * Retaining negative dentries for an in-memory filesystem just wastes 47 * Retaining negative dentries for an in-memory filesystem just wastes
48 * memory and lookup time: arrange for them to be deleted immediately. 48 * memory and lookup time: arrange for them to be deleted immediately.
49 */ 49 */
50static int simple_delete_dentry(const struct dentry *dentry) 50int always_delete_dentry(const struct dentry *dentry)
51{ 51{
52 return 1; 52 return 1;
53} 53}
54EXPORT_SYMBOL(always_delete_dentry);
55
56const struct dentry_operations simple_dentry_operations = {
57 .d_delete = always_delete_dentry,
58};
59EXPORT_SYMBOL(simple_dentry_operations);
54 60
55/* 61/*
56 * Lookup the data. This is trivial - if the dentry didn't already 62 * Lookup the data. This is trivial - if the dentry didn't already
@@ -58,10 +64,6 @@ static int simple_delete_dentry(const struct dentry *dentry)
58 */ 64 */
59struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 65struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
60{ 66{
61 static const struct dentry_operations simple_dentry_operations = {
62 .d_delete = simple_delete_dentry,
63 };
64
65 if (dentry->d_name.len > NAME_MAX) 67 if (dentry->d_name.len > NAME_MAX)
66 return ERR_PTR(-ENAMETOOLONG); 68 return ERR_PTR(-ENAMETOOLONG);
67 if (!dentry->d_sb->s_d_op) 69 if (!dentry->d_sb->s_d_op)
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 550475ca6a0e..0f95f0d0b313 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -14,16 +14,10 @@
14 14
15#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) 15#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
16 16
17static void request_complete(struct bio *bio, int err)
18{
19 complete((struct completion *)bio->bi_private);
20}
21
22static int sync_request(struct page *page, struct block_device *bdev, int rw) 17static int sync_request(struct page *page, struct block_device *bdev, int rw)
23{ 18{
24 struct bio bio; 19 struct bio bio;
25 struct bio_vec bio_vec; 20 struct bio_vec bio_vec;
26 struct completion complete;
27 21
28 bio_init(&bio); 22 bio_init(&bio);
29 bio.bi_max_vecs = 1; 23 bio.bi_max_vecs = 1;
@@ -35,13 +29,8 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
35 bio.bi_size = PAGE_SIZE; 29 bio.bi_size = PAGE_SIZE;
36 bio.bi_bdev = bdev; 30 bio.bi_bdev = bdev;
37 bio.bi_sector = page->index * (PAGE_SIZE >> 9); 31 bio.bi_sector = page->index * (PAGE_SIZE >> 9);
38 init_completion(&complete);
39 bio.bi_private = &complete;
40 bio.bi_end_io = request_complete;
41 32
42 submit_bio(rw, &bio); 33 return submit_bio_wait(rw, &bio);
43 wait_for_completion(&complete);
44 return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
45} 34}
46 35
47static int bdev_readpage(void *_sb, struct page *page) 36static int bdev_readpage(void *_sb, struct page *page)
diff --git a/fs/namei.c b/fs/namei.c
index e029a4cbff7d..3531deebad30 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -513,8 +513,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
513 513
514 if (!lockref_get_not_dead(&parent->d_lockref)) { 514 if (!lockref_get_not_dead(&parent->d_lockref)) {
515 nd->path.dentry = NULL; 515 nd->path.dentry = NULL;
516 rcu_read_unlock(); 516 goto out;
517 return -ECHILD;
518 } 517 }
519 518
520 /* 519 /*
@@ -1599,11 +1598,6 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
1599 * do a "get_unaligned()" if this helps and is sufficiently 1598 * do a "get_unaligned()" if this helps and is sufficiently
1600 * fast. 1599 * fast.
1601 * 1600 *
1602 * - Little-endian machines (so that we can generate the mask
1603 * of low bytes efficiently). Again, we *could* do a byte
1604 * swapping load on big-endian architectures if that is not
1605 * expensive enough to make the optimization worthless.
1606 *
1607 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we 1601 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1608 * do not trap on the (extremely unlikely) case of a page 1602 * do not trap on the (extremely unlikely) case of a page
1609 * crossing operation. 1603 * crossing operation.
@@ -1647,7 +1641,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1647 if (!len) 1641 if (!len)
1648 goto done; 1642 goto done;
1649 } 1643 }
1650 mask = ~(~0ul << len*8); 1644 mask = bytemask_from_count(len);
1651 hash += mask & a; 1645 hash += mask & a;
1652done: 1646done:
1653 return fold_hash(hash); 1647 return fold_hash(hash);
@@ -2435,6 +2429,7 @@ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
2435 */ 2429 */
2436static inline int may_create(struct inode *dir, struct dentry *child) 2430static inline int may_create(struct inode *dir, struct dentry *child)
2437{ 2431{
2432 audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
2438 if (child->d_inode) 2433 if (child->d_inode)
2439 return -EEXIST; 2434 return -EEXIST;
2440 if (IS_DEADDIR(dir)) 2435 if (IS_DEADDIR(dir))
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 8485978993e8..9838fb020473 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -36,6 +36,7 @@
36#include <linux/nfs_fs.h> 36#include <linux/nfs_fs.h>
37#include <linux/sunrpc/rpc_pipe_fs.h> 37#include <linux/sunrpc/rpc_pipe_fs.h>
38 38
39#include "../nfs4_fs.h"
39#include "../pnfs.h" 40#include "../pnfs.h"
40#include "../netns.h" 41#include "../netns.h"
41 42
diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
index 9c3e117c3ed1..4d0161442565 100644
--- a/fs/nfs/blocklayout/extents.c
+++ b/fs/nfs/blocklayout/extents.c
@@ -44,7 +44,7 @@
44static inline sector_t normalize(sector_t s, int base) 44static inline sector_t normalize(sector_t s, int base)
45{ 45{
46 sector_t tmp = s; /* Since do_div modifies its argument */ 46 sector_t tmp = s; /* Since do_div modifies its argument */
47 return s - do_div(tmp, base); 47 return s - sector_div(tmp, base);
48} 48}
49 49
50static inline sector_t normalize_up(sector_t s, int base) 50static inline sector_t normalize_up(sector_t s, int base)
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index fc0f95ec7358..d25f10fb4926 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -46,7 +46,9 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
46#include <linux/sunrpc/cache.h> 46#include <linux/sunrpc/cache.h>
47#include <linux/sunrpc/svcauth.h> 47#include <linux/sunrpc/svcauth.h>
48#include <linux/sunrpc/rpc_pipe_fs.h> 48#include <linux/sunrpc/rpc_pipe_fs.h>
49#include <linux/nfs_fs.h>
49 50
51#include "nfs4_fs.h"
50#include "dns_resolve.h" 52#include "dns_resolve.h"
51#include "cache_lib.h" 53#include "cache_lib.h"
52#include "netns.h" 54#include "netns.h"
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 18ab2da4eeb6..00ad1c2b217d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -312,7 +312,7 @@ struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags)
312} 312}
313EXPORT_SYMBOL_GPL(nfs4_label_alloc); 313EXPORT_SYMBOL_GPL(nfs4_label_alloc);
314#else 314#else
315void inline nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, 315void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
316 struct nfs4_label *label) 316 struct nfs4_label *label)
317{ 317{
318} 318}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index bca6a3e3c49c..8b5cc04a8611 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -269,6 +269,21 @@ extern const u32 nfs41_maxgetdevinfo_overhead;
269extern struct rpc_procinfo nfs4_procedures[]; 269extern struct rpc_procinfo nfs4_procedures[];
270#endif 270#endif
271 271
272#ifdef CONFIG_NFS_V4_SECURITY_LABEL
273extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
274static inline void nfs4_label_free(struct nfs4_label *label)
275{
276 if (label) {
277 kfree(label->label);
278 kfree(label);
279 }
280 return;
281}
282#else
283static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
284static inline void nfs4_label_free(void *label) {}
285#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
286
272/* proc.c */ 287/* proc.c */
273void nfs_close_context(struct nfs_open_context *ctx, int is_sync); 288void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
274extern struct nfs_client *nfs_init_client(struct nfs_client *clp, 289extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 3ce79b04522e..5609edc742a0 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -9,6 +9,14 @@
9#ifndef __LINUX_FS_NFS_NFS4_FS_H 9#ifndef __LINUX_FS_NFS_NFS4_FS_H
10#define __LINUX_FS_NFS_NFS4_FS_H 10#define __LINUX_FS_NFS_NFS4_FS_H
11 11
12#if defined(CONFIG_NFS_V4_2)
13#define NFS4_MAX_MINOR_VERSION 2
14#elif defined(CONFIG_NFS_V4_1)
15#define NFS4_MAX_MINOR_VERSION 1
16#else
17#define NFS4_MAX_MINOR_VERSION 0
18#endif
19
12#if IS_ENABLED(CONFIG_NFS_V4) 20#if IS_ENABLED(CONFIG_NFS_V4)
13 21
14#define NFS4_MAX_LOOP_ON_RECOVER (10) 22#define NFS4_MAX_LOOP_ON_RECOVER (10)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 659990c0109e..15052b81df42 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2518,9 +2518,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
2518 calldata->roc_barrier); 2518 calldata->roc_barrier);
2519 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 2519 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2520 renew_lease(server, calldata->timestamp); 2520 renew_lease(server, calldata->timestamp);
2521 nfs4_close_clear_stateid_flags(state,
2522 calldata->arg.fmode);
2523 break; 2521 break;
2522 case -NFS4ERR_ADMIN_REVOKED:
2524 case -NFS4ERR_STALE_STATEID: 2523 case -NFS4ERR_STALE_STATEID:
2525 case -NFS4ERR_OLD_STATEID: 2524 case -NFS4ERR_OLD_STATEID:
2526 case -NFS4ERR_BAD_STATEID: 2525 case -NFS4ERR_BAD_STATEID:
@@ -2528,9 +2527,13 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
2528 if (calldata->arg.fmode == 0) 2527 if (calldata->arg.fmode == 0)
2529 break; 2528 break;
2530 default: 2529 default:
2531 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 2530 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
2532 rpc_restart_call_prepare(task); 2531 rpc_restart_call_prepare(task);
2532 goto out_release;
2533 }
2533 } 2534 }
2535 nfs4_close_clear_stateid_flags(state, calldata->arg.fmode);
2536out_release:
2534 nfs_release_seqid(calldata->arg.seqid); 2537 nfs_release_seqid(calldata->arg.seqid);
2535 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2538 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2536 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2539 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
@@ -4802,7 +4805,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
4802 dprintk("%s ERROR %d, Reset session\n", __func__, 4805 dprintk("%s ERROR %d, Reset session\n", __func__,
4803 task->tk_status); 4806 task->tk_status);
4804 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 4807 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4805 goto restart_call; 4808 goto wait_on_recovery;
4806#endif /* CONFIG_NFS_V4_1 */ 4809#endif /* CONFIG_NFS_V4_1 */
4807 case -NFS4ERR_DELAY: 4810 case -NFS4ERR_DELAY:
4808 nfs_inc_server_stats(server, NFSIOS_DELAY); 4811 nfs_inc_server_stats(server, NFSIOS_DELAY);
@@ -4987,11 +4990,17 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4987 4990
4988 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 4991 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
4989 switch (task->tk_status) { 4992 switch (task->tk_status) {
4990 case -NFS4ERR_STALE_STATEID:
4991 case -NFS4ERR_EXPIRED:
4992 case 0: 4993 case 0:
4993 renew_lease(data->res.server, data->timestamp); 4994 renew_lease(data->res.server, data->timestamp);
4994 break; 4995 break;
4996 case -NFS4ERR_ADMIN_REVOKED:
4997 case -NFS4ERR_DELEG_REVOKED:
4998 case -NFS4ERR_BAD_STATEID:
4999 case -NFS4ERR_OLD_STATEID:
5000 case -NFS4ERR_STALE_STATEID:
5001 case -NFS4ERR_EXPIRED:
5002 task->tk_status = 0;
5003 break;
4995 default: 5004 default:
4996 if (nfs4_async_handle_error(task, data->res.server, NULL) == 5005 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4997 -EAGAIN) { 5006 -EAGAIN) {
@@ -7589,7 +7598,14 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
7589 return; 7598 return;
7590 7599
7591 server = NFS_SERVER(lrp->args.inode); 7600 server = NFS_SERVER(lrp->args.inode);
7592 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 7601 switch (task->tk_status) {
7602 default:
7603 task->tk_status = 0;
7604 case 0:
7605 break;
7606 case -NFS4ERR_DELAY:
7607 if (nfs4_async_handle_error(task, server, NULL) != -EAGAIN)
7608 break;
7593 rpc_restart_call_prepare(task); 7609 rpc_restart_call_prepare(task);
7594 return; 7610 return;
7595 } 7611 }
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 088de1355e93..ee7237f99f54 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -141,8 +141,8 @@ xdr_error: \
141 141
142static void next_decode_page(struct nfsd4_compoundargs *argp) 142static void next_decode_page(struct nfsd4_compoundargs *argp)
143{ 143{
144 argp->pagelist++;
145 argp->p = page_address(argp->pagelist[0]); 144 argp->p = page_address(argp->pagelist[0]);
145 argp->pagelist++;
146 if (argp->pagelen < PAGE_SIZE) { 146 if (argp->pagelen < PAGE_SIZE) {
147 argp->end = argp->p + (argp->pagelen>>2); 147 argp->end = argp->p + (argp->pagelen>>2);
148 argp->pagelen = 0; 148 argp->pagelen = 0;
@@ -1229,6 +1229,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
1229 len -= pages * PAGE_SIZE; 1229 len -= pages * PAGE_SIZE;
1230 1230
1231 argp->p = (__be32 *)page_address(argp->pagelist[0]); 1231 argp->p = (__be32 *)page_address(argp->pagelist[0]);
1232 argp->pagelist++;
1232 argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE); 1233 argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
1233 } 1234 }
1234 argp->p += XDR_QUADLEN(len); 1235 argp->p += XDR_QUADLEN(len);
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 9186c7ce0b14..b6af150c96b8 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -132,6 +132,13 @@ nfsd_reply_cache_alloc(void)
132} 132}
133 133
134static void 134static void
135nfsd_reply_cache_unhash(struct svc_cacherep *rp)
136{
137 hlist_del_init(&rp->c_hash);
138 list_del_init(&rp->c_lru);
139}
140
141static void
135nfsd_reply_cache_free_locked(struct svc_cacherep *rp) 142nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
136{ 143{
137 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { 144 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
@@ -417,7 +424,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
417 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); 424 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
418 if (nfsd_cache_entry_expired(rp) || 425 if (nfsd_cache_entry_expired(rp) ||
419 num_drc_entries >= max_drc_entries) { 426 num_drc_entries >= max_drc_entries) {
420 lru_put_end(rp); 427 nfsd_reply_cache_unhash(rp);
421 prune_cache_entries(); 428 prune_cache_entries();
422 goto search_cache; 429 goto search_cache;
423 } 430 }
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 94b5f5d2bfed..7eea63cada1d 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -298,41 +298,12 @@ commit_metadata(struct svc_fh *fhp)
298} 298}
299 299
300/* 300/*
301 * Set various file attributes. 301 * Go over the attributes and take care of the small differences between
302 * N.B. After this call fhp needs an fh_put 302 * NFS semantics and what Linux expects.
303 */ 303 */
304__be32 304static void
305nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, 305nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
306 int check_guard, time_t guardtime)
307{ 306{
308 struct dentry *dentry;
309 struct inode *inode;
310 int accmode = NFSD_MAY_SATTR;
311 umode_t ftype = 0;
312 __be32 err;
313 int host_err;
314 int size_change = 0;
315
316 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
317 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
318 if (iap->ia_valid & ATTR_SIZE)
319 ftype = S_IFREG;
320
321 /* Get inode */
322 err = fh_verify(rqstp, fhp, ftype, accmode);
323 if (err)
324 goto out;
325
326 dentry = fhp->fh_dentry;
327 inode = dentry->d_inode;
328
329 /* Ignore any mode updates on symlinks */
330 if (S_ISLNK(inode->i_mode))
331 iap->ia_valid &= ~ATTR_MODE;
332
333 if (!iap->ia_valid)
334 goto out;
335
336 /* 307 /*
337 * NFSv2 does not differentiate between "set-[ac]time-to-now" 308 * NFSv2 does not differentiate between "set-[ac]time-to-now"
338 * which only requires access, and "set-[ac]time-to-X" which 309 * which only requires access, and "set-[ac]time-to-X" which
@@ -342,8 +313,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
342 * convert to "set to now" instead of "set to explicit time" 313 * convert to "set to now" instead of "set to explicit time"
343 * 314 *
344 * We only call inode_change_ok as the last test as technically 315 * We only call inode_change_ok as the last test as technically
345 * it is not an interface that we should be using. It is only 316 * it is not an interface that we should be using.
346 * valid if the filesystem does not define it's own i_op->setattr.
347 */ 317 */
348#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET) 318#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
349#define MAX_TOUCH_TIME_ERROR (30*60) 319#define MAX_TOUCH_TIME_ERROR (30*60)
@@ -369,30 +339,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
369 iap->ia_valid &= ~BOTH_TIME_SET; 339 iap->ia_valid &= ~BOTH_TIME_SET;
370 } 340 }
371 } 341 }
372
373 /*
374 * The size case is special.
375 * It changes the file as well as the attributes.
376 */
377 if (iap->ia_valid & ATTR_SIZE) {
378 if (iap->ia_size < inode->i_size) {
379 err = nfsd_permission(rqstp, fhp->fh_export, dentry,
380 NFSD_MAY_TRUNC|NFSD_MAY_OWNER_OVERRIDE);
381 if (err)
382 goto out;
383 }
384
385 host_err = get_write_access(inode);
386 if (host_err)
387 goto out_nfserr;
388
389 size_change = 1;
390 host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
391 if (host_err) {
392 put_write_access(inode);
393 goto out_nfserr;
394 }
395 }
396 342
397 /* sanitize the mode change */ 343 /* sanitize the mode change */
398 if (iap->ia_valid & ATTR_MODE) { 344 if (iap->ia_valid & ATTR_MODE) {
@@ -415,32 +361,111 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
415 iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID); 361 iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
416 } 362 }
417 } 363 }
364}
418 365
419 /* Change the attributes. */ 366static __be32
367nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
368 struct iattr *iap)
369{
370 struct inode *inode = fhp->fh_dentry->d_inode;
371 int host_err;
420 372
421 iap->ia_valid |= ATTR_CTIME; 373 if (iap->ia_size < inode->i_size) {
374 __be32 err;
422 375
423 err = nfserr_notsync; 376 err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
424 if (!check_guard || guardtime == inode->i_ctime.tv_sec) { 377 NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
425 host_err = nfsd_break_lease(inode); 378 if (err)
426 if (host_err) 379 return err;
427 goto out_nfserr; 380 }
428 fh_lock(fhp);
429 381
430 host_err = notify_change(dentry, iap, NULL); 382 host_err = get_write_access(inode);
431 err = nfserrno(host_err); 383 if (host_err)
432 fh_unlock(fhp); 384 goto out_nfserrno;
385
386 host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
387 if (host_err)
388 goto out_put_write_access;
389 return 0;
390
391out_put_write_access:
392 put_write_access(inode);
393out_nfserrno:
394 return nfserrno(host_err);
395}
396
397/*
398 * Set various file attributes. After this call fhp needs an fh_put.
399 */
400__be32
401nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
402 int check_guard, time_t guardtime)
403{
404 struct dentry *dentry;
405 struct inode *inode;
406 int accmode = NFSD_MAY_SATTR;
407 umode_t ftype = 0;
408 __be32 err;
409 int host_err;
410 int size_change = 0;
411
412 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
413 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
414 if (iap->ia_valid & ATTR_SIZE)
415 ftype = S_IFREG;
416
417 /* Get inode */
418 err = fh_verify(rqstp, fhp, ftype, accmode);
419 if (err)
420 goto out;
421
422 dentry = fhp->fh_dentry;
423 inode = dentry->d_inode;
424
425 /* Ignore any mode updates on symlinks */
426 if (S_ISLNK(inode->i_mode))
427 iap->ia_valid &= ~ATTR_MODE;
428
429 if (!iap->ia_valid)
430 goto out;
431
432 nfsd_sanitize_attrs(inode, iap);
433
434 /*
435 * The size case is special, it changes the file in addition to the
436 * attributes.
437 */
438 if (iap->ia_valid & ATTR_SIZE) {
439 err = nfsd_get_write_access(rqstp, fhp, iap);
440 if (err)
441 goto out;
442 size_change = 1;
433 } 443 }
444
445 iap->ia_valid |= ATTR_CTIME;
446
447 if (check_guard && guardtime != inode->i_ctime.tv_sec) {
448 err = nfserr_notsync;
449 goto out_put_write_access;
450 }
451
452 host_err = nfsd_break_lease(inode);
453 if (host_err)
454 goto out_put_write_access_nfserror;
455
456 fh_lock(fhp);
457 host_err = notify_change(dentry, iap, NULL);
458 fh_unlock(fhp);
459
460out_put_write_access_nfserror:
461 err = nfserrno(host_err);
462out_put_write_access:
434 if (size_change) 463 if (size_change)
435 put_write_access(inode); 464 put_write_access(inode);
436 if (!err) 465 if (!err)
437 commit_metadata(fhp); 466 commit_metadata(fhp);
438out: 467out:
439 return err; 468 return err;
440
441out_nfserr:
442 err = nfserrno(host_err);
443 goto out;
444} 469}
445 470
446#if defined(CONFIG_NFSD_V2_ACL) || \ 471#if defined(CONFIG_NFSD_V2_ACL) || \
diff --git a/fs/pipe.c b/fs/pipe.c
index d2c45e14e6d8..0e0752ef2715 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -726,11 +726,25 @@ pipe_poll(struct file *filp, poll_table *wait)
726 return mask; 726 return mask;
727} 727}
728 728
729static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
730{
731 int kill = 0;
732
733 spin_lock(&inode->i_lock);
734 if (!--pipe->files) {
735 inode->i_pipe = NULL;
736 kill = 1;
737 }
738 spin_unlock(&inode->i_lock);
739
740 if (kill)
741 free_pipe_info(pipe);
742}
743
729static int 744static int
730pipe_release(struct inode *inode, struct file *file) 745pipe_release(struct inode *inode, struct file *file)
731{ 746{
732 struct pipe_inode_info *pipe = inode->i_pipe; 747 struct pipe_inode_info *pipe = file->private_data;
733 int kill = 0;
734 748
735 __pipe_lock(pipe); 749 __pipe_lock(pipe);
736 if (file->f_mode & FMODE_READ) 750 if (file->f_mode & FMODE_READ)
@@ -743,17 +757,9 @@ pipe_release(struct inode *inode, struct file *file)
743 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 757 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
744 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 758 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
745 } 759 }
746 spin_lock(&inode->i_lock);
747 if (!--pipe->files) {
748 inode->i_pipe = NULL;
749 kill = 1;
750 }
751 spin_unlock(&inode->i_lock);
752 __pipe_unlock(pipe); 760 __pipe_unlock(pipe);
753 761
754 if (kill) 762 put_pipe_info(inode, pipe);
755 free_pipe_info(pipe);
756
757 return 0; 763 return 0;
758} 764}
759 765
@@ -1014,7 +1020,6 @@ static int fifo_open(struct inode *inode, struct file *filp)
1014{ 1020{
1015 struct pipe_inode_info *pipe; 1021 struct pipe_inode_info *pipe;
1016 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; 1022 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1017 int kill = 0;
1018 int ret; 1023 int ret;
1019 1024
1020 filp->f_version = 0; 1025 filp->f_version = 0;
@@ -1130,15 +1135,9 @@ err_wr:
1130 goto err; 1135 goto err;
1131 1136
1132err: 1137err:
1133 spin_lock(&inode->i_lock);
1134 if (!--pipe->files) {
1135 inode->i_pipe = NULL;
1136 kill = 1;
1137 }
1138 spin_unlock(&inode->i_lock);
1139 __pipe_unlock(pipe); 1138 __pipe_unlock(pipe);
1140 if (kill) 1139
1141 free_pipe_info(pipe); 1140 put_pipe_info(inode, pipe);
1142 return ret; 1141 return ret;
1143} 1142}
1144 1143
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 1485e38daaa3..03c8d747be48 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1151,10 +1151,16 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1151 goto out_free_page; 1151 goto out_free_page;
1152 1152
1153 } 1153 }
1154 kloginuid = make_kuid(file->f_cred->user_ns, loginuid); 1154
1155 if (!uid_valid(kloginuid)) { 1155 /* is userspace tring to explicitly UNSET the loginuid? */
1156 length = -EINVAL; 1156 if (loginuid == AUDIT_UID_UNSET) {
1157 goto out_free_page; 1157 kloginuid = INVALID_UID;
1158 } else {
1159 kloginuid = make_kuid(file->f_cred->user_ns, loginuid);
1160 if (!uid_valid(kloginuid)) {
1161 length = -EINVAL;
1162 goto out_free_page;
1163 }
1158 } 1164 }
1159 1165
1160 length = audit_set_loginuid(kloginuid); 1166 length = audit_set_loginuid(kloginuid);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 737e15615b04..cca93b6fb9a9 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -175,22 +175,6 @@ static const struct inode_operations proc_link_inode_operations = {
175}; 175};
176 176
177/* 177/*
178 * As some entries in /proc are volatile, we want to
179 * get rid of unused dentries. This could be made
180 * smarter: we could keep a "volatile" flag in the
181 * inode to indicate which ones to keep.
182 */
183static int proc_delete_dentry(const struct dentry * dentry)
184{
185 return 1;
186}
187
188static const struct dentry_operations proc_dentry_operations =
189{
190 .d_delete = proc_delete_dentry,
191};
192
193/*
194 * Don't create negative dentries here, return -ENOENT by hand 178 * Don't create negative dentries here, return -ENOENT by hand
195 * instead. 179 * instead.
196 */ 180 */
@@ -209,7 +193,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
209 inode = proc_get_inode(dir->i_sb, de); 193 inode = proc_get_inode(dir->i_sb, de);
210 if (!inode) 194 if (!inode)
211 return ERR_PTR(-ENOMEM); 195 return ERR_PTR(-ENOMEM);
212 d_set_d_op(dentry, &proc_dentry_operations); 196 d_set_d_op(dentry, &simple_dentry_operations);
213 d_add(dentry, inode); 197 d_add(dentry, inode);
214 return NULL; 198 return NULL;
215 } 199 }
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 28955d4b7218..124fc43c7090 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -292,16 +292,20 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
292{ 292{
293 struct proc_dir_entry *pde = PDE(file_inode(file)); 293 struct proc_dir_entry *pde = PDE(file_inode(file));
294 unsigned long rv = -EIO; 294 unsigned long rv = -EIO;
295 unsigned long (*get_area)(struct file *, unsigned long, unsigned long, 295
296 unsigned long, unsigned long) = NULL;
297 if (use_pde(pde)) { 296 if (use_pde(pde)) {
297 typeof(proc_reg_get_unmapped_area) *get_area;
298
299 get_area = pde->proc_fops->get_unmapped_area;
298#ifdef CONFIG_MMU 300#ifdef CONFIG_MMU
299 get_area = current->mm->get_unmapped_area; 301 if (!get_area)
302 get_area = current->mm->get_unmapped_area;
300#endif 303#endif
301 if (pde->proc_fops->get_unmapped_area) 304
302 get_area = pde->proc_fops->get_unmapped_area;
303 if (get_area) 305 if (get_area)
304 rv = get_area(file, orig_addr, len, pgoff, flags); 306 rv = get_area(file, orig_addr, len, pgoff, flags);
307 else
308 rv = orig_addr;
305 unuse_pde(pde); 309 unuse_pde(pde);
306 } 310 }
307 return rv; 311 return rv;
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 49a7fff2e83a..9ae46b87470d 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -42,12 +42,6 @@ static const struct inode_operations ns_inode_operations = {
42 .setattr = proc_setattr, 42 .setattr = proc_setattr,
43}; 43};
44 44
45static int ns_delete_dentry(const struct dentry *dentry)
46{
47 /* Don't cache namespace inodes when not in use */
48 return 1;
49}
50
51static char *ns_dname(struct dentry *dentry, char *buffer, int buflen) 45static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
52{ 46{
53 struct inode *inode = dentry->d_inode; 47 struct inode *inode = dentry->d_inode;
@@ -59,7 +53,7 @@ static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
59 53
60const struct dentry_operations ns_dentry_operations = 54const struct dentry_operations ns_dentry_operations =
61{ 55{
62 .d_delete = ns_delete_dentry, 56 .d_delete = always_delete_dentry,
63 .d_dname = ns_dname, 57 .d_dname = ns_dname,
64}; 58};
65 59
diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
index 16e8abb7709b..72d29177998e 100644
--- a/fs/quota/netlink.c
+++ b/fs/quota/netlink.c
@@ -9,13 +9,25 @@
9#include <net/netlink.h> 9#include <net/netlink.h>
10#include <net/genetlink.h> 10#include <net/genetlink.h>
11 11
12static const struct genl_multicast_group quota_mcgrps[] = {
13 { .name = "events", },
14};
15
12/* Netlink family structure for quota */ 16/* Netlink family structure for quota */
13static struct genl_family quota_genl_family = { 17static struct genl_family quota_genl_family = {
14 .id = GENL_ID_GENERATE, 18 /*
19 * Needed due to multicast group ID abuse - old code assumed
20 * the family ID was also a valid multicast group ID (which
21 * isn't true) and userspace might thus rely on it. Assign a
22 * static ID for this group to make dealing with that easier.
23 */
24 .id = GENL_ID_VFS_DQUOT,
15 .hdrsize = 0, 25 .hdrsize = 0,
16 .name = "VFS_DQUOT", 26 .name = "VFS_DQUOT",
17 .version = 1, 27 .version = 1,
18 .maxattr = QUOTA_NL_A_MAX, 28 .maxattr = QUOTA_NL_A_MAX,
29 .mcgrps = quota_mcgrps,
30 .n_mcgrps = ARRAY_SIZE(quota_mcgrps),
19}; 31};
20 32
21/** 33/**
@@ -78,7 +90,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
78 goto attr_err_out; 90 goto attr_err_out;
79 genlmsg_end(skb, msg_head); 91 genlmsg_end(skb, msg_head);
80 92
81 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); 93 genlmsg_multicast(&quota_genl_family, skb, 0, 0, GFP_NOFS);
82 return; 94 return;
83attr_err_out: 95attr_err_out:
84 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); 96 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index c70111ebefd4..b6fa8657dcbc 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -25,6 +25,78 @@ config SQUASHFS
25 25
26 If unsure, say N. 26 If unsure, say N.
27 27
28choice
29 prompt "File decompression options"
30 depends on SQUASHFS
31 help
32 Squashfs now supports two options for decompressing file
33 data. Traditionally Squashfs has decompressed into an
34 intermediate buffer and then memcopied it into the page cache.
35 Squashfs now supports the ability to decompress directly into
36 the page cache.
37
38 If unsure, select "Decompress file data into an intermediate buffer"
39
40config SQUASHFS_FILE_CACHE
41 bool "Decompress file data into an intermediate buffer"
42 help
43 Decompress file data into an intermediate buffer and then
44 memcopy it into the page cache.
45
46config SQUASHFS_FILE_DIRECT
47 bool "Decompress files directly into the page cache"
48 help
49 Directly decompress file data into the page cache.
50 Doing so can significantly improve performance because
51 it eliminates a memcpy and it also removes the lock contention
52 on the single buffer.
53
54endchoice
55
56choice
57 prompt "Decompressor parallelisation options"
58 depends on SQUASHFS
59 help
60 Squashfs now supports three parallelisation options for
61 decompression. Each one exhibits various trade-offs between
62 decompression performance and CPU and memory usage.
63
64 If in doubt, select "Single threaded compression"
65
66config SQUASHFS_DECOMP_SINGLE
67 bool "Single threaded compression"
68 help
69 Traditionally Squashfs has used single-threaded decompression.
70 Only one block (data or metadata) can be decompressed at any
71 one time. This limits CPU and memory usage to a minimum.
72
73config SQUASHFS_DECOMP_MULTI
74 bool "Use multiple decompressors for parallel I/O"
75 help
76 By default Squashfs uses a single decompressor but it gives
77 poor performance on parallel I/O workloads when using multiple CPU
78 machines due to waiting on decompressor availability.
79
80 If you have a parallel I/O workload and your system has enough memory,
81 using this option may improve overall I/O performance.
82
83 This decompressor implementation uses up to two parallel
84 decompressors per core. It dynamically allocates decompressors
85 on a demand basis.
86
87config SQUASHFS_DECOMP_MULTI_PERCPU
88 bool "Use percpu multiple decompressors for parallel I/O"
89 help
90 By default Squashfs uses a single decompressor but it gives
91 poor performance on parallel I/O workloads when using multiple CPU
92 machines due to waiting on decompressor availability.
93
94 This decompressor implementation uses a maximum of one
95 decompressor per core. It uses percpu variables to ensure
96 decompression is load-balanced across the cores.
97
98endchoice
99
28config SQUASHFS_XATTR 100config SQUASHFS_XATTR
29 bool "Squashfs XATTR support" 101 bool "Squashfs XATTR support"
30 depends on SQUASHFS 102 depends on SQUASHFS
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 110b0476f3b4..4132520b4ff2 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -5,6 +5,11 @@
5obj-$(CONFIG_SQUASHFS) += squashfs.o 5obj-$(CONFIG_SQUASHFS) += squashfs.o
6squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o 6squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
7squashfs-y += namei.o super.o symlink.o decompressor.o 7squashfs-y += namei.o super.o symlink.o decompressor.o
8squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
9squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
10squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
11squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
12squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
8squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o 13squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o
9squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o 14squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
10squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o 15squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 41d108ecc9be..0cea9b9236d0 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -36,6 +36,7 @@
36#include "squashfs_fs_sb.h" 36#include "squashfs_fs_sb.h"
37#include "squashfs.h" 37#include "squashfs.h"
38#include "decompressor.h" 38#include "decompressor.h"
39#include "page_actor.h"
39 40
40/* 41/*
41 * Read the metadata block length, this is stored in the first two 42 * Read the metadata block length, this is stored in the first two
@@ -86,16 +87,16 @@ static struct buffer_head *get_block_length(struct super_block *sb,
86 * generated a larger block - this does occasionally happen with compression 87 * generated a larger block - this does occasionally happen with compression
87 * algorithms). 88 * algorithms).
88 */ 89 */
89int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, 90int squashfs_read_data(struct super_block *sb, u64 index, int length,
90 int length, u64 *next_index, int srclength, int pages) 91 u64 *next_index, struct squashfs_page_actor *output)
91{ 92{
92 struct squashfs_sb_info *msblk = sb->s_fs_info; 93 struct squashfs_sb_info *msblk = sb->s_fs_info;
93 struct buffer_head **bh; 94 struct buffer_head **bh;
94 int offset = index & ((1 << msblk->devblksize_log2) - 1); 95 int offset = index & ((1 << msblk->devblksize_log2) - 1);
95 u64 cur_index = index >> msblk->devblksize_log2; 96 u64 cur_index = index >> msblk->devblksize_log2;
96 int bytes, compressed, b = 0, k = 0, page = 0, avail; 97 int bytes, compressed, b = 0, k = 0, avail, i;
97 98
98 bh = kcalloc(((srclength + msblk->devblksize - 1) 99 bh = kcalloc(((output->length + msblk->devblksize - 1)
99 >> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL); 100 >> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
100 if (bh == NULL) 101 if (bh == NULL)
101 return -ENOMEM; 102 return -ENOMEM;
@@ -111,9 +112,9 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
111 *next_index = index + length; 112 *next_index = index + length;
112 113
113 TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", 114 TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
114 index, compressed ? "" : "un", length, srclength); 115 index, compressed ? "" : "un", length, output->length);
115 116
116 if (length < 0 || length > srclength || 117 if (length < 0 || length > output->length ||
117 (index + length) > msblk->bytes_used) 118 (index + length) > msblk->bytes_used)
118 goto read_failure; 119 goto read_failure;
119 120
@@ -145,7 +146,7 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
145 TRACE("Block @ 0x%llx, %scompressed size %d\n", index, 146 TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
146 compressed ? "" : "un", length); 147 compressed ? "" : "un", length);
147 148
148 if (length < 0 || length > srclength || 149 if (length < 0 || length > output->length ||
149 (index + length) > msblk->bytes_used) 150 (index + length) > msblk->bytes_used)
150 goto block_release; 151 goto block_release;
151 152
@@ -158,9 +159,15 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
158 ll_rw_block(READ, b - 1, bh + 1); 159 ll_rw_block(READ, b - 1, bh + 1);
159 } 160 }
160 161
162 for (i = 0; i < b; i++) {
163 wait_on_buffer(bh[i]);
164 if (!buffer_uptodate(bh[i]))
165 goto block_release;
166 }
167
161 if (compressed) { 168 if (compressed) {
162 length = squashfs_decompress(msblk, buffer, bh, b, offset, 169 length = squashfs_decompress(msblk, bh, b, offset, length,
163 length, srclength, pages); 170 output);
164 if (length < 0) 171 if (length < 0)
165 goto read_failure; 172 goto read_failure;
166 } else { 173 } else {
@@ -168,22 +175,20 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
168 * Block is uncompressed. 175 * Block is uncompressed.
169 */ 176 */
170 int in, pg_offset = 0; 177 int in, pg_offset = 0;
178 void *data = squashfs_first_page(output);
171 179
172 for (bytes = length; k < b; k++) { 180 for (bytes = length; k < b; k++) {
173 in = min(bytes, msblk->devblksize - offset); 181 in = min(bytes, msblk->devblksize - offset);
174 bytes -= in; 182 bytes -= in;
175 wait_on_buffer(bh[k]);
176 if (!buffer_uptodate(bh[k]))
177 goto block_release;
178 while (in) { 183 while (in) {
179 if (pg_offset == PAGE_CACHE_SIZE) { 184 if (pg_offset == PAGE_CACHE_SIZE) {
180 page++; 185 data = squashfs_next_page(output);
181 pg_offset = 0; 186 pg_offset = 0;
182 } 187 }
183 avail = min_t(int, in, PAGE_CACHE_SIZE - 188 avail = min_t(int, in, PAGE_CACHE_SIZE -
184 pg_offset); 189 pg_offset);
185 memcpy(buffer[page] + pg_offset, 190 memcpy(data + pg_offset, bh[k]->b_data + offset,
186 bh[k]->b_data + offset, avail); 191 avail);
187 in -= avail; 192 in -= avail;
188 pg_offset += avail; 193 pg_offset += avail;
189 offset += avail; 194 offset += avail;
@@ -191,6 +196,7 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
191 offset = 0; 196 offset = 0;
192 put_bh(bh[k]); 197 put_bh(bh[k]);
193 } 198 }
199 squashfs_finish_page(output);
194 } 200 }
195 201
196 kfree(bh); 202 kfree(bh);
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index af0b73802592..1cb70a0b2168 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -56,6 +56,7 @@
56#include "squashfs_fs.h" 56#include "squashfs_fs.h"
57#include "squashfs_fs_sb.h" 57#include "squashfs_fs_sb.h"
58#include "squashfs.h" 58#include "squashfs.h"
59#include "page_actor.h"
59 60
60/* 61/*
61 * Look-up block in cache, and increment usage count. If not in cache, read 62 * Look-up block in cache, and increment usage count. If not in cache, read
@@ -119,9 +120,8 @@ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
119 entry->error = 0; 120 entry->error = 0;
120 spin_unlock(&cache->lock); 121 spin_unlock(&cache->lock);
121 122
122 entry->length = squashfs_read_data(sb, entry->data, 123 entry->length = squashfs_read_data(sb, block, length,
123 block, length, &entry->next_index, 124 &entry->next_index, entry->actor);
124 cache->block_size, cache->pages);
125 125
126 spin_lock(&cache->lock); 126 spin_lock(&cache->lock);
127 127
@@ -220,6 +220,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
220 kfree(cache->entry[i].data[j]); 220 kfree(cache->entry[i].data[j]);
221 kfree(cache->entry[i].data); 221 kfree(cache->entry[i].data);
222 } 222 }
223 kfree(cache->entry[i].actor);
223 } 224 }
224 225
225 kfree(cache->entry); 226 kfree(cache->entry);
@@ -280,6 +281,13 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
280 goto cleanup; 281 goto cleanup;
281 } 282 }
282 } 283 }
284
285 entry->actor = squashfs_page_actor_init(entry->data,
286 cache->pages, 0);
287 if (entry->actor == NULL) {
288 ERROR("Failed to allocate %s cache entry\n", name);
289 goto cleanup;
290 }
283 } 291 }
284 292
285 return cache; 293 return cache;
@@ -410,6 +418,7 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length)
410 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 418 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
411 int i, res; 419 int i, res;
412 void *table, *buffer, **data; 420 void *table, *buffer, **data;
421 struct squashfs_page_actor *actor;
413 422
414 table = buffer = kmalloc(length, GFP_KERNEL); 423 table = buffer = kmalloc(length, GFP_KERNEL);
415 if (table == NULL) 424 if (table == NULL)
@@ -421,19 +430,28 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length)
421 goto failed; 430 goto failed;
422 } 431 }
423 432
433 actor = squashfs_page_actor_init(data, pages, length);
434 if (actor == NULL) {
435 res = -ENOMEM;
436 goto failed2;
437 }
438
424 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) 439 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
425 data[i] = buffer; 440 data[i] = buffer;
426 441
427 res = squashfs_read_data(sb, data, block, length | 442 res = squashfs_read_data(sb, block, length |
428 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); 443 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
429 444
430 kfree(data); 445 kfree(data);
446 kfree(actor);
431 447
432 if (res < 0) 448 if (res < 0)
433 goto failed; 449 goto failed;
434 450
435 return table; 451 return table;
436 452
453failed2:
454 kfree(data);
437failed: 455failed:
438 kfree(table); 456 kfree(table);
439 return ERR_PTR(res); 457 return ERR_PTR(res);
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index 3f6271d86abc..ac22fe73b0ad 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -30,6 +30,7 @@
30#include "squashfs_fs_sb.h" 30#include "squashfs_fs_sb.h"
31#include "decompressor.h" 31#include "decompressor.h"
32#include "squashfs.h" 32#include "squashfs.h"
33#include "page_actor.h"
33 34
34/* 35/*
35 * This file (and decompressor.h) implements a decompressor framework for 36 * This file (and decompressor.h) implements a decompressor framework for
@@ -37,29 +38,29 @@
37 */ 38 */
38 39
39static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = { 40static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = {
40 NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0 41 NULL, NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
41}; 42};
42 43
43#ifndef CONFIG_SQUASHFS_LZO 44#ifndef CONFIG_SQUASHFS_LZO
44static const struct squashfs_decompressor squashfs_lzo_comp_ops = { 45static const struct squashfs_decompressor squashfs_lzo_comp_ops = {
45 NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0 46 NULL, NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
46}; 47};
47#endif 48#endif
48 49
49#ifndef CONFIG_SQUASHFS_XZ 50#ifndef CONFIG_SQUASHFS_XZ
50static const struct squashfs_decompressor squashfs_xz_comp_ops = { 51static const struct squashfs_decompressor squashfs_xz_comp_ops = {
51 NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0 52 NULL, NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
52}; 53};
53#endif 54#endif
54 55
55#ifndef CONFIG_SQUASHFS_ZLIB 56#ifndef CONFIG_SQUASHFS_ZLIB
56static const struct squashfs_decompressor squashfs_zlib_comp_ops = { 57static const struct squashfs_decompressor squashfs_zlib_comp_ops = {
57 NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0 58 NULL, NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0
58}; 59};
59#endif 60#endif
60 61
61static const struct squashfs_decompressor squashfs_unknown_comp_ops = { 62static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
62 NULL, NULL, NULL, 0, "unknown", 0 63 NULL, NULL, NULL, NULL, 0, "unknown", 0
63}; 64};
64 65
65static const struct squashfs_decompressor *decompressor[] = { 66static const struct squashfs_decompressor *decompressor[] = {
@@ -83,10 +84,11 @@ const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
83} 84}
84 85
85 86
86void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags) 87static void *get_comp_opts(struct super_block *sb, unsigned short flags)
87{ 88{
88 struct squashfs_sb_info *msblk = sb->s_fs_info; 89 struct squashfs_sb_info *msblk = sb->s_fs_info;
89 void *strm, *buffer = NULL; 90 void *buffer = NULL, *comp_opts;
91 struct squashfs_page_actor *actor = NULL;
90 int length = 0; 92 int length = 0;
91 93
92 /* 94 /*
@@ -94,23 +96,46 @@ void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags)
94 */ 96 */
95 if (SQUASHFS_COMP_OPTS(flags)) { 97 if (SQUASHFS_COMP_OPTS(flags)) {
96 buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 98 buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
97 if (buffer == NULL) 99 if (buffer == NULL) {
98 return ERR_PTR(-ENOMEM); 100 comp_opts = ERR_PTR(-ENOMEM);
101 goto out;
102 }
103
104 actor = squashfs_page_actor_init(&buffer, 1, 0);
105 if (actor == NULL) {
106 comp_opts = ERR_PTR(-ENOMEM);
107 goto out;
108 }
99 109
100 length = squashfs_read_data(sb, &buffer, 110 length = squashfs_read_data(sb,
101 sizeof(struct squashfs_super_block), 0, NULL, 111 sizeof(struct squashfs_super_block), 0, NULL, actor);
102 PAGE_CACHE_SIZE, 1);
103 112
104 if (length < 0) { 113 if (length < 0) {
105 strm = ERR_PTR(length); 114 comp_opts = ERR_PTR(length);
106 goto finished; 115 goto out;
107 } 116 }
108 } 117 }
109 118
110 strm = msblk->decompressor->init(msblk, buffer, length); 119 comp_opts = squashfs_comp_opts(msblk, buffer, length);
111 120
112finished: 121out:
122 kfree(actor);
113 kfree(buffer); 123 kfree(buffer);
124 return comp_opts;
125}
126
127
128void *squashfs_decompressor_setup(struct super_block *sb, unsigned short flags)
129{
130 struct squashfs_sb_info *msblk = sb->s_fs_info;
131 void *stream, *comp_opts = get_comp_opts(sb, flags);
132
133 if (IS_ERR(comp_opts))
134 return comp_opts;
135
136 stream = squashfs_decompressor_create(msblk, comp_opts);
137 if (IS_ERR(stream))
138 kfree(comp_opts);
114 139
115 return strm; 140 return stream;
116} 141}
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 330073e29029..af0985321808 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -24,28 +24,22 @@
24 */ 24 */
25 25
26struct squashfs_decompressor { 26struct squashfs_decompressor {
27 void *(*init)(struct squashfs_sb_info *, void *, int); 27 void *(*init)(struct squashfs_sb_info *, void *);
28 void *(*comp_opts)(struct squashfs_sb_info *, void *, int);
28 void (*free)(void *); 29 void (*free)(void *);
29 int (*decompress)(struct squashfs_sb_info *, void **, 30 int (*decompress)(struct squashfs_sb_info *, void *,
30 struct buffer_head **, int, int, int, int, int); 31 struct buffer_head **, int, int, int,
32 struct squashfs_page_actor *);
31 int id; 33 int id;
32 char *name; 34 char *name;
33 int supported; 35 int supported;
34}; 36};
35 37
36static inline void squashfs_decompressor_free(struct squashfs_sb_info *msblk, 38static inline void *squashfs_comp_opts(struct squashfs_sb_info *msblk,
37 void *s) 39 void *buff, int length)
38{ 40{
39 if (msblk->decompressor) 41 return msblk->decompressor->comp_opts ?
40 msblk->decompressor->free(s); 42 msblk->decompressor->comp_opts(msblk, buff, length) : NULL;
41}
42
43static inline int squashfs_decompress(struct squashfs_sb_info *msblk,
44 void **buffer, struct buffer_head **bh, int b, int offset, int length,
45 int srclength, int pages)
46{
47 return msblk->decompressor->decompress(msblk, buffer, bh, b, offset,
48 length, srclength, pages);
49} 43}
50 44
51#ifdef CONFIG_SQUASHFS_XZ 45#ifdef CONFIG_SQUASHFS_XZ
diff --git a/fs/squashfs/decompressor_multi.c b/fs/squashfs/decompressor_multi.c
new file mode 100644
index 000000000000..d6008a636479
--- /dev/null
+++ b/fs/squashfs/decompressor_multi.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright (c) 2013
3 * Minchan Kim <minchan@kernel.org>
4 *
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8#include <linux/types.h>
9#include <linux/mutex.h>
10#include <linux/slab.h>
11#include <linux/buffer_head.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/cpumask.h>
15
16#include "squashfs_fs.h"
17#include "squashfs_fs_sb.h"
18#include "decompressor.h"
19#include "squashfs.h"
20
21/*
22 * This file implements multi-threaded decompression in the
23 * decompressor framework
24 */
25
26
27/*
28 * The reason that multiply two is that a CPU can request new I/O
29 * while it is waiting previous request.
30 */
31#define MAX_DECOMPRESSOR (num_online_cpus() * 2)
32
33
34int squashfs_max_decompressors(void)
35{
36 return MAX_DECOMPRESSOR;
37}
38
39
40struct squashfs_stream {
41 void *comp_opts;
42 struct list_head strm_list;
43 struct mutex mutex;
44 int avail_decomp;
45 wait_queue_head_t wait;
46};
47
48
49struct decomp_stream {
50 void *stream;
51 struct list_head list;
52};
53
54
55static void put_decomp_stream(struct decomp_stream *decomp_strm,
56 struct squashfs_stream *stream)
57{
58 mutex_lock(&stream->mutex);
59 list_add(&decomp_strm->list, &stream->strm_list);
60 mutex_unlock(&stream->mutex);
61 wake_up(&stream->wait);
62}
63
64void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
65 void *comp_opts)
66{
67 struct squashfs_stream *stream;
68 struct decomp_stream *decomp_strm = NULL;
69 int err = -ENOMEM;
70
71 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
72 if (!stream)
73 goto out;
74
75 stream->comp_opts = comp_opts;
76 mutex_init(&stream->mutex);
77 INIT_LIST_HEAD(&stream->strm_list);
78 init_waitqueue_head(&stream->wait);
79
80 /*
81 * We should have a decompressor at least as default
82 * so if we fail to allocate new decompressor dynamically,
83 * we could always fall back to default decompressor and
84 * file system works.
85 */
86 decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
87 if (!decomp_strm)
88 goto out;
89
90 decomp_strm->stream = msblk->decompressor->init(msblk,
91 stream->comp_opts);
92 if (IS_ERR(decomp_strm->stream)) {
93 err = PTR_ERR(decomp_strm->stream);
94 goto out;
95 }
96
97 list_add(&decomp_strm->list, &stream->strm_list);
98 stream->avail_decomp = 1;
99 return stream;
100
101out:
102 kfree(decomp_strm);
103 kfree(stream);
104 return ERR_PTR(err);
105}
106
107
108void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
109{
110 struct squashfs_stream *stream = msblk->stream;
111 if (stream) {
112 struct decomp_stream *decomp_strm;
113
114 while (!list_empty(&stream->strm_list)) {
115 decomp_strm = list_entry(stream->strm_list.prev,
116 struct decomp_stream, list);
117 list_del(&decomp_strm->list);
118 msblk->decompressor->free(decomp_strm->stream);
119 kfree(decomp_strm);
120 stream->avail_decomp--;
121 }
122 WARN_ON(stream->avail_decomp);
123 kfree(stream->comp_opts);
124 kfree(stream);
125 }
126}
127
128
129static struct decomp_stream *get_decomp_stream(struct squashfs_sb_info *msblk,
130 struct squashfs_stream *stream)
131{
132 struct decomp_stream *decomp_strm;
133
134 while (1) {
135 mutex_lock(&stream->mutex);
136
137 /* There is available decomp_stream */
138 if (!list_empty(&stream->strm_list)) {
139 decomp_strm = list_entry(stream->strm_list.prev,
140 struct decomp_stream, list);
141 list_del(&decomp_strm->list);
142 mutex_unlock(&stream->mutex);
143 break;
144 }
145
146 /*
147 * If there is no available decomp and already full,
148 * let's wait for releasing decomp from other users.
149 */
150 if (stream->avail_decomp >= MAX_DECOMPRESSOR)
151 goto wait;
152
153 /* Let's allocate new decomp */
154 decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
155 if (!decomp_strm)
156 goto wait;
157
158 decomp_strm->stream = msblk->decompressor->init(msblk,
159 stream->comp_opts);
160 if (IS_ERR(decomp_strm->stream)) {
161 kfree(decomp_strm);
162 goto wait;
163 }
164
165 stream->avail_decomp++;
166 WARN_ON(stream->avail_decomp > MAX_DECOMPRESSOR);
167
168 mutex_unlock(&stream->mutex);
169 break;
170wait:
171 /*
172 * If system memory is tough, let's for other's
173 * releasing instead of hurting VM because it could
174 * make page cache thrashing.
175 */
176 mutex_unlock(&stream->mutex);
177 wait_event(stream->wait,
178 !list_empty(&stream->strm_list));
179 }
180
181 return decomp_strm;
182}
183
184
185int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
186 int b, int offset, int length, struct squashfs_page_actor *output)
187{
188 int res;
189 struct squashfs_stream *stream = msblk->stream;
190 struct decomp_stream *decomp_stream = get_decomp_stream(msblk, stream);
191 res = msblk->decompressor->decompress(msblk, decomp_stream->stream,
192 bh, b, offset, length, output);
193 put_decomp_stream(decomp_stream, stream);
194 if (res < 0)
195 ERROR("%s decompression failed, data probably corrupt\n",
196 msblk->decompressor->name);
197 return res;
198}
diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c
new file mode 100644
index 000000000000..23a9c28ad8ea
--- /dev/null
+++ b/fs/squashfs/decompressor_multi_percpu.c
@@ -0,0 +1,97 @@
1/*
2 * Copyright (c) 2013
3 * Phillip Lougher <phillip@squashfs.org.uk>
4 *
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/percpu.h>
12#include <linux/buffer_head.h>
13
14#include "squashfs_fs.h"
15#include "squashfs_fs_sb.h"
16#include "decompressor.h"
17#include "squashfs.h"
18
19/*
20 * This file implements multi-threaded decompression using percpu
21 * variables, one thread per cpu core.
22 */
23
24struct squashfs_stream {
25 void *stream;
26};
27
28void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
29 void *comp_opts)
30{
31 struct squashfs_stream *stream;
32 struct squashfs_stream __percpu *percpu;
33 int err, cpu;
34
35 percpu = alloc_percpu(struct squashfs_stream);
36 if (percpu == NULL)
37 return ERR_PTR(-ENOMEM);
38
39 for_each_possible_cpu(cpu) {
40 stream = per_cpu_ptr(percpu, cpu);
41 stream->stream = msblk->decompressor->init(msblk, comp_opts);
42 if (IS_ERR(stream->stream)) {
43 err = PTR_ERR(stream->stream);
44 goto out;
45 }
46 }
47
48 kfree(comp_opts);
49 return (__force void *) percpu;
50
51out:
52 for_each_possible_cpu(cpu) {
53 stream = per_cpu_ptr(percpu, cpu);
54 if (!IS_ERR_OR_NULL(stream->stream))
55 msblk->decompressor->free(stream->stream);
56 }
57 free_percpu(percpu);
58 return ERR_PTR(err);
59}
60
61void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
62{
63 struct squashfs_stream __percpu *percpu =
64 (struct squashfs_stream __percpu *) msblk->stream;
65 struct squashfs_stream *stream;
66 int cpu;
67
68 if (msblk->stream) {
69 for_each_possible_cpu(cpu) {
70 stream = per_cpu_ptr(percpu, cpu);
71 msblk->decompressor->free(stream->stream);
72 }
73 free_percpu(percpu);
74 }
75}
76
77int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
78 int b, int offset, int length, struct squashfs_page_actor *output)
79{
80 struct squashfs_stream __percpu *percpu =
81 (struct squashfs_stream __percpu *) msblk->stream;
82 struct squashfs_stream *stream = get_cpu_ptr(percpu);
83 int res = msblk->decompressor->decompress(msblk, stream->stream, bh, b,
84 offset, length, output);
85 put_cpu_ptr(stream);
86
87 if (res < 0)
88 ERROR("%s decompression failed, data probably corrupt\n",
89 msblk->decompressor->name);
90
91 return res;
92}
93
94int squashfs_max_decompressors(void)
95{
96 return num_possible_cpus();
97}
diff --git a/fs/squashfs/decompressor_single.c b/fs/squashfs/decompressor_single.c
new file mode 100644
index 000000000000..a6c75929a00e
--- /dev/null
+++ b/fs/squashfs/decompressor_single.c
@@ -0,0 +1,85 @@
1/*
2 * Copyright (c) 2013
3 * Phillip Lougher <phillip@squashfs.org.uk>
4 *
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
10#include <linux/mutex.h>
11#include <linux/slab.h>
12#include <linux/buffer_head.h>
13
14#include "squashfs_fs.h"
15#include "squashfs_fs_sb.h"
16#include "decompressor.h"
17#include "squashfs.h"
18
19/*
20 * This file implements single-threaded decompression in the
21 * decompressor framework
22 */
23
24struct squashfs_stream {
25 void *stream;
26 struct mutex mutex;
27};
28
29void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
30 void *comp_opts)
31{
32 struct squashfs_stream *stream;
33 int err = -ENOMEM;
34
35 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
36 if (stream == NULL)
37 goto out;
38
39 stream->stream = msblk->decompressor->init(msblk, comp_opts);
40 if (IS_ERR(stream->stream)) {
41 err = PTR_ERR(stream->stream);
42 goto out;
43 }
44
45 kfree(comp_opts);
46 mutex_init(&stream->mutex);
47 return stream;
48
49out:
50 kfree(stream);
51 return ERR_PTR(err);
52}
53
54void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
55{
56 struct squashfs_stream *stream = msblk->stream;
57
58 if (stream) {
59 msblk->decompressor->free(stream->stream);
60 kfree(stream);
61 }
62}
63
64int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
65 int b, int offset, int length, struct squashfs_page_actor *output)
66{
67 int res;
68 struct squashfs_stream *stream = msblk->stream;
69
70 mutex_lock(&stream->mutex);
71 res = msblk->decompressor->decompress(msblk, stream->stream, bh, b,
72 offset, length, output);
73 mutex_unlock(&stream->mutex);
74
75 if (res < 0)
76 ERROR("%s decompression failed, data probably corrupt\n",
77 msblk->decompressor->name);
78
79 return res;
80}
81
82int squashfs_max_decompressors(void)
83{
84 return 1;
85}
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 8ca62c28fe12..e5c9689062ba 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -370,77 +370,15 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
370 return le32_to_cpu(size); 370 return le32_to_cpu(size);
371} 371}
372 372
373 373/* Copy data into page cache */
374static int squashfs_readpage(struct file *file, struct page *page) 374void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
375 int bytes, int offset)
375{ 376{
376 struct inode *inode = page->mapping->host; 377 struct inode *inode = page->mapping->host;
377 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 378 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
378 int bytes, i, offset = 0, sparse = 0;
379 struct squashfs_cache_entry *buffer = NULL;
380 void *pageaddr; 379 void *pageaddr;
381 380 int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
382 int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; 381 int start_index = page->index & ~mask, end_index = start_index | mask;
383 int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
384 int start_index = page->index & ~mask;
385 int end_index = start_index | mask;
386 int file_end = i_size_read(inode) >> msblk->block_log;
387
388 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
389 page->index, squashfs_i(inode)->start);
390
391 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
392 PAGE_CACHE_SHIFT))
393 goto out;
394
395 if (index < file_end || squashfs_i(inode)->fragment_block ==
396 SQUASHFS_INVALID_BLK) {
397 /*
398 * Reading a datablock from disk. Need to read block list
399 * to get location and block size.
400 */
401 u64 block = 0;
402 int bsize = read_blocklist(inode, index, &block);
403 if (bsize < 0)
404 goto error_out;
405
406 if (bsize == 0) { /* hole */
407 bytes = index == file_end ?
408 (i_size_read(inode) & (msblk->block_size - 1)) :
409 msblk->block_size;
410 sparse = 1;
411 } else {
412 /*
413 * Read and decompress datablock.
414 */
415 buffer = squashfs_get_datablock(inode->i_sb,
416 block, bsize);
417 if (buffer->error) {
418 ERROR("Unable to read page, block %llx, size %x"
419 "\n", block, bsize);
420 squashfs_cache_put(buffer);
421 goto error_out;
422 }
423 bytes = buffer->length;
424 }
425 } else {
426 /*
427 * Datablock is stored inside a fragment (tail-end packed
428 * block).
429 */
430 buffer = squashfs_get_fragment(inode->i_sb,
431 squashfs_i(inode)->fragment_block,
432 squashfs_i(inode)->fragment_size);
433
434 if (buffer->error) {
435 ERROR("Unable to read page, block %llx, size %x\n",
436 squashfs_i(inode)->fragment_block,
437 squashfs_i(inode)->fragment_size);
438 squashfs_cache_put(buffer);
439 goto error_out;
440 }
441 bytes = i_size_read(inode) & (msblk->block_size - 1);
442 offset = squashfs_i(inode)->fragment_offset;
443 }
444 382
445 /* 383 /*
446 * Loop copying datablock into pages. As the datablock likely covers 384 * Loop copying datablock into pages. As the datablock likely covers
@@ -451,7 +389,7 @@ static int squashfs_readpage(struct file *file, struct page *page)
451 for (i = start_index; i <= end_index && bytes > 0; i++, 389 for (i = start_index; i <= end_index && bytes > 0; i++,
452 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { 390 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
453 struct page *push_page; 391 struct page *push_page;
454 int avail = sparse ? 0 : min_t(int, bytes, PAGE_CACHE_SIZE); 392 int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0;
455 393
456 TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); 394 TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
457 395
@@ -475,11 +413,75 @@ skip_page:
475 if (i != page->index) 413 if (i != page->index)
476 page_cache_release(push_page); 414 page_cache_release(push_page);
477 } 415 }
416}
417
418/* Read datablock stored packed inside a fragment (tail-end packed block) */
419static int squashfs_readpage_fragment(struct page *page)
420{
421 struct inode *inode = page->mapping->host;
422 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
423 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
424 squashfs_i(inode)->fragment_block,
425 squashfs_i(inode)->fragment_size);
426 int res = buffer->error;
427
428 if (res)
429 ERROR("Unable to read page, block %llx, size %x\n",
430 squashfs_i(inode)->fragment_block,
431 squashfs_i(inode)->fragment_size);
432 else
433 squashfs_copy_cache(page, buffer, i_size_read(inode) &
434 (msblk->block_size - 1),
435 squashfs_i(inode)->fragment_offset);
436
437 squashfs_cache_put(buffer);
438 return res;
439}
478 440
479 if (!sparse) 441static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
480 squashfs_cache_put(buffer); 442{
443 struct inode *inode = page->mapping->host;
444 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
445 int bytes = index == file_end ?
446 (i_size_read(inode) & (msblk->block_size - 1)) :
447 msblk->block_size;
481 448
449 squashfs_copy_cache(page, NULL, bytes, 0);
482 return 0; 450 return 0;
451}
452
453static int squashfs_readpage(struct file *file, struct page *page)
454{
455 struct inode *inode = page->mapping->host;
456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
457 int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
458 int file_end = i_size_read(inode) >> msblk->block_log;
459 int res;
460 void *pageaddr;
461
462 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
463 page->index, squashfs_i(inode)->start);
464
465 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
466 PAGE_CACHE_SHIFT))
467 goto out;
468
469 if (index < file_end || squashfs_i(inode)->fragment_block ==
470 SQUASHFS_INVALID_BLK) {
471 u64 block = 0;
472 int bsize = read_blocklist(inode, index, &block);
473 if (bsize < 0)
474 goto error_out;
475
476 if (bsize == 0)
477 res = squashfs_readpage_sparse(page, index, file_end);
478 else
479 res = squashfs_readpage_block(page, block, bsize);
480 } else
481 res = squashfs_readpage_fragment(page);
482
483 if (!res)
484 return 0;
483 485
484error_out: 486error_out:
485 SetPageError(page); 487 SetPageError(page);
diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
new file mode 100644
index 000000000000..f2310d2a2019
--- /dev/null
+++ b/fs/squashfs/file_cache.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright (c) 2013
3 * Phillip Lougher <phillip@squashfs.org.uk>
4 *
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/fs.h>
10#include <linux/vfs.h>
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/string.h>
14#include <linux/pagemap.h>
15#include <linux/mutex.h>
16
17#include "squashfs_fs.h"
18#include "squashfs_fs_sb.h"
19#include "squashfs_fs_i.h"
20#include "squashfs.h"
21
22/* Read separately compressed datablock and memcopy into page cache */
23int squashfs_readpage_block(struct page *page, u64 block, int bsize)
24{
25 struct inode *i = page->mapping->host;
26 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
27 block, bsize);
28 int res = buffer->error;
29
30 if (res)
31 ERROR("Unable to read page, block %llx, size %x\n", block,
32 bsize);
33 else
34 squashfs_copy_cache(page, buffer, buffer->length, 0);
35
36 squashfs_cache_put(buffer);
37 return res;
38}
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
new file mode 100644
index 000000000000..62a0de6632e1
--- /dev/null
+++ b/fs/squashfs/file_direct.c
@@ -0,0 +1,176 @@
1/*
2 * Copyright (c) 2013
3 * Phillip Lougher <phillip@squashfs.org.uk>
4 *
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/fs.h>
10#include <linux/vfs.h>
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/string.h>
14#include <linux/pagemap.h>
15#include <linux/mutex.h>
16
17#include "squashfs_fs.h"
18#include "squashfs_fs_sb.h"
19#include "squashfs_fs_i.h"
20#include "squashfs.h"
21#include "page_actor.h"
22
23static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
24 int pages, struct page **page);
25
26/* Read separately compressed datablock directly into page cache */
27int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
28
29{
30 struct inode *inode = target_page->mapping->host;
31 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
32
33 int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
34 int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
35 int start_index = target_page->index & ~mask;
36 int end_index = start_index | mask;
37 int i, n, pages, missing_pages, bytes, res = -ENOMEM;
38 struct page **page;
39 struct squashfs_page_actor *actor;
40 void *pageaddr;
41
42 if (end_index > file_end)
43 end_index = file_end;
44
45 pages = end_index - start_index + 1;
46
47 page = kmalloc(sizeof(void *) * pages, GFP_KERNEL);
48 if (page == NULL)
49 return res;
50
51 /*
52 * Create a "page actor" which will kmap and kunmap the
53 * page cache pages appropriately within the decompressor
54 */
55 actor = squashfs_page_actor_init_special(page, pages, 0);
56 if (actor == NULL)
57 goto out;
58
59 /* Try to grab all the pages covered by the Squashfs block */
60 for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
61 page[i] = (n == target_page->index) ? target_page :
62 grab_cache_page_nowait(target_page->mapping, n);
63
64 if (page[i] == NULL) {
65 missing_pages++;
66 continue;
67 }
68
69 if (PageUptodate(page[i])) {
70 unlock_page(page[i]);
71 page_cache_release(page[i]);
72 page[i] = NULL;
73 missing_pages++;
74 }
75 }
76
77 if (missing_pages) {
78 /*
79 * Couldn't get one or more pages, this page has either
80 * been VM reclaimed, but others are still in the page cache
81 * and uptodate, or we're racing with another thread in
82 * squashfs_readpage also trying to grab them. Fall back to
83 * using an intermediate buffer.
84 */
85 res = squashfs_read_cache(target_page, block, bsize, pages,
86 page);
87 if (res < 0)
88 goto mark_errored;
89
90 goto out;
91 }
92
93 /* Decompress directly into the page cache buffers */
94 res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
95 if (res < 0)
96 goto mark_errored;
97
98 /* Last page may have trailing bytes not filled */
99 bytes = res % PAGE_CACHE_SIZE;
100 if (bytes) {
101 pageaddr = kmap_atomic(page[pages - 1]);
102 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
103 kunmap_atomic(pageaddr);
104 }
105
106 /* Mark pages as uptodate, unlock and release */
107 for (i = 0; i < pages; i++) {
108 flush_dcache_page(page[i]);
109 SetPageUptodate(page[i]);
110 unlock_page(page[i]);
111 if (page[i] != target_page)
112 page_cache_release(page[i]);
113 }
114
115 kfree(actor);
116 kfree(page);
117
118 return 0;
119
120mark_errored:
121 /* Decompression failed, mark pages as errored. Target_page is
122 * dealt with by the caller
123 */
124 for (i = 0; i < pages; i++) {
125 if (page[i] == NULL || page[i] == target_page)
126 continue;
127 flush_dcache_page(page[i]);
128 SetPageError(page[i]);
129 unlock_page(page[i]);
130 page_cache_release(page[i]);
131 }
132
133out:
134 kfree(actor);
135 kfree(page);
136 return res;
137}
138
139
140static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
141 int pages, struct page **page)
142{
143 struct inode *i = target_page->mapping->host;
144 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
145 block, bsize);
146 int bytes = buffer->length, res = buffer->error, n, offset = 0;
147 void *pageaddr;
148
149 if (res) {
150 ERROR("Unable to read page, block %llx, size %x\n", block,
151 bsize);
152 goto out;
153 }
154
155 for (n = 0; n < pages && bytes > 0; n++,
156 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
157 int avail = min_t(int, bytes, PAGE_CACHE_SIZE);
158
159 if (page[n] == NULL)
160 continue;
161
162 pageaddr = kmap_atomic(page[n]);
163 squashfs_copy_data(pageaddr, buffer, offset, avail);
164 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
165 kunmap_atomic(pageaddr);
166 flush_dcache_page(page[n]);
167 SetPageUptodate(page[n]);
168 unlock_page(page[n]);
169 if (page[n] != target_page)
170 page_cache_release(page[n]);
171 }
172
173out:
174 squashfs_cache_put(buffer);
175 return res;
176}
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 00f4dfc5f088..244b9fbfff7b 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -31,13 +31,14 @@
31#include "squashfs_fs_sb.h" 31#include "squashfs_fs_sb.h"
32#include "squashfs.h" 32#include "squashfs.h"
33#include "decompressor.h" 33#include "decompressor.h"
34#include "page_actor.h"
34 35
35struct squashfs_lzo { 36struct squashfs_lzo {
36 void *input; 37 void *input;
37 void *output; 38 void *output;
38}; 39};
39 40
40static void *lzo_init(struct squashfs_sb_info *msblk, void *buff, int len) 41static void *lzo_init(struct squashfs_sb_info *msblk, void *buff)
41{ 42{
42 int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE); 43 int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
43 44
@@ -74,22 +75,16 @@ static void lzo_free(void *strm)
74} 75}
75 76
76 77
77static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer, 78static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
78 struct buffer_head **bh, int b, int offset, int length, int srclength, 79 struct buffer_head **bh, int b, int offset, int length,
79 int pages) 80 struct squashfs_page_actor *output)
80{ 81{
81 struct squashfs_lzo *stream = msblk->stream; 82 struct squashfs_lzo *stream = strm;
82 void *buff = stream->input; 83 void *buff = stream->input, *data;
83 int avail, i, bytes = length, res; 84 int avail, i, bytes = length, res;
84 size_t out_len = srclength; 85 size_t out_len = output->length;
85
86 mutex_lock(&msblk->read_data_mutex);
87 86
88 for (i = 0; i < b; i++) { 87 for (i = 0; i < b; i++) {
89 wait_on_buffer(bh[i]);
90 if (!buffer_uptodate(bh[i]))
91 goto block_release;
92
93 avail = min(bytes, msblk->devblksize - offset); 88 avail = min(bytes, msblk->devblksize - offset);
94 memcpy(buff, bh[i]->b_data + offset, avail); 89 memcpy(buff, bh[i]->b_data + offset, avail);
95 buff += avail; 90 buff += avail;
@@ -104,24 +99,24 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer,
104 goto failed; 99 goto failed;
105 100
106 res = bytes = (int)out_len; 101 res = bytes = (int)out_len;
107 for (i = 0, buff = stream->output; bytes && i < pages; i++) { 102 data = squashfs_first_page(output);
108 avail = min_t(int, bytes, PAGE_CACHE_SIZE); 103 buff = stream->output;
109 memcpy(buffer[i], buff, avail); 104 while (data) {
110 buff += avail; 105 if (bytes <= PAGE_CACHE_SIZE) {
111 bytes -= avail; 106 memcpy(data, buff, bytes);
107 break;
108 } else {
109 memcpy(data, buff, PAGE_CACHE_SIZE);
110 buff += PAGE_CACHE_SIZE;
111 bytes -= PAGE_CACHE_SIZE;
112 data = squashfs_next_page(output);
113 }
112 } 114 }
115 squashfs_finish_page(output);
113 116
114 mutex_unlock(&msblk->read_data_mutex);
115 return res; 117 return res;
116 118
117block_release:
118 for (; i < b; i++)
119 put_bh(bh[i]);
120
121failed: 119failed:
122 mutex_unlock(&msblk->read_data_mutex);
123
124 ERROR("lzo decompression failed, data probably corrupt\n");
125 return -EIO; 120 return -EIO;
126} 121}
127 122
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
new file mode 100644
index 000000000000..5a1c11f56441
--- /dev/null
+++ b/fs/squashfs/page_actor.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright (c) 2013
3 * Phillip Lougher <phillip@squashfs.org.uk>
4 *
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/pagemap.h>
12#include "page_actor.h"
13
14/*
15 * This file contains implementations of page_actor for decompressing into
16 * an intermediate buffer, and for decompressing directly into the
17 * page cache.
18 *
19 * Calling code should avoid sleeping between calls to squashfs_first_page()
20 * and squashfs_finish_page().
21 */
22
23/* Implementation of page_actor for decompressing into intermediate buffer */
24static void *cache_first_page(struct squashfs_page_actor *actor)
25{
26 actor->next_page = 1;
27 return actor->buffer[0];
28}
29
30static void *cache_next_page(struct squashfs_page_actor *actor)
31{
32 if (actor->next_page == actor->pages)
33 return NULL;
34
35 return actor->buffer[actor->next_page++];
36}
37
38static void cache_finish_page(struct squashfs_page_actor *actor)
39{
40 /* empty */
41}
42
43struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
44 int pages, int length)
45{
46 struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
47
48 if (actor == NULL)
49 return NULL;
50
51 actor->length = length ? : pages * PAGE_CACHE_SIZE;
52 actor->buffer = buffer;
53 actor->pages = pages;
54 actor->next_page = 0;
55 actor->squashfs_first_page = cache_first_page;
56 actor->squashfs_next_page = cache_next_page;
57 actor->squashfs_finish_page = cache_finish_page;
58 return actor;
59}
60
61/* Implementation of page_actor for decompressing directly into page cache. */
62static void *direct_first_page(struct squashfs_page_actor *actor)
63{
64 actor->next_page = 1;
65 return actor->pageaddr = kmap_atomic(actor->page[0]);
66}
67
68static void *direct_next_page(struct squashfs_page_actor *actor)
69{
70 if (actor->pageaddr)
71 kunmap_atomic(actor->pageaddr);
72
73 return actor->pageaddr = actor->next_page == actor->pages ? NULL :
74 kmap_atomic(actor->page[actor->next_page++]);
75}
76
77static void direct_finish_page(struct squashfs_page_actor *actor)
78{
79 if (actor->pageaddr)
80 kunmap_atomic(actor->pageaddr);
81}
82
83struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
84 int pages, int length)
85{
86 struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
87
88 if (actor == NULL)
89 return NULL;
90
91 actor->length = length ? : pages * PAGE_CACHE_SIZE;
92 actor->page = page;
93 actor->pages = pages;
94 actor->next_page = 0;
95 actor->pageaddr = NULL;
96 actor->squashfs_first_page = direct_first_page;
97 actor->squashfs_next_page = direct_next_page;
98 actor->squashfs_finish_page = direct_finish_page;
99 return actor;
100}
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
new file mode 100644
index 000000000000..26dd82008b82
--- /dev/null
+++ b/fs/squashfs/page_actor.h
@@ -0,0 +1,81 @@
1#ifndef PAGE_ACTOR_H
2#define PAGE_ACTOR_H
3/*
4 * Copyright (c) 2013
5 * Phillip Lougher <phillip@squashfs.org.uk>
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2. See
8 * the COPYING file in the top-level directory.
9 */
10
11#ifndef CONFIG_SQUASHFS_FILE_DIRECT
12struct squashfs_page_actor {
13 void **page;
14 int pages;
15 int length;
16 int next_page;
17};
18
19static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
20 int pages, int length)
21{
22 struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
23
24 if (actor == NULL)
25 return NULL;
26
27 actor->length = length ? : pages * PAGE_CACHE_SIZE;
28 actor->page = page;
29 actor->pages = pages;
30 actor->next_page = 0;
31 return actor;
32}
33
34static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
35{
36 actor->next_page = 1;
37 return actor->page[0];
38}
39
40static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
41{
42 return actor->next_page == actor->pages ? NULL :
43 actor->page[actor->next_page++];
44}
45
46static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
47{
48 /* empty */
49}
50#else
51struct squashfs_page_actor {
52 union {
53 void **buffer;
54 struct page **page;
55 };
56 void *pageaddr;
57 void *(*squashfs_first_page)(struct squashfs_page_actor *);
58 void *(*squashfs_next_page)(struct squashfs_page_actor *);
59 void (*squashfs_finish_page)(struct squashfs_page_actor *);
60 int pages;
61 int length;
62 int next_page;
63};
64
65extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int);
66extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
67 **, int, int);
68static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
69{
70 return actor->squashfs_first_page(actor);
71}
72static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
73{
74 return actor->squashfs_next_page(actor);
75}
76static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
77{
78 actor->squashfs_finish_page(actor);
79}
80#endif
81#endif
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index d1266516ed08..9e1bb79f7e6f 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -28,8 +28,8 @@
28#define WARNING(s, args...) pr_warning("SQUASHFS: "s, ## args) 28#define WARNING(s, args...) pr_warning("SQUASHFS: "s, ## args)
29 29
30/* block.c */ 30/* block.c */
31extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *, 31extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
32 int, int); 32 struct squashfs_page_actor *);
33 33
34/* cache.c */ 34/* cache.c */
35extern struct squashfs_cache *squashfs_cache_init(char *, int, int); 35extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
@@ -48,7 +48,14 @@ extern void *squashfs_read_table(struct super_block *, u64, int);
48 48
49/* decompressor.c */ 49/* decompressor.c */
50extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int); 50extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
51extern void *squashfs_decompressor_init(struct super_block *, unsigned short); 51extern void *squashfs_decompressor_setup(struct super_block *, unsigned short);
52
53/* decompressor_xxx.c */
54extern void *squashfs_decompressor_create(struct squashfs_sb_info *, void *);
55extern void squashfs_decompressor_destroy(struct squashfs_sb_info *);
56extern int squashfs_decompress(struct squashfs_sb_info *, struct buffer_head **,
57 int, int, int, struct squashfs_page_actor *);
58extern int squashfs_max_decompressors(void);
52 59
53/* export.c */ 60/* export.c */
54extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64, 61extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64,
@@ -59,6 +66,13 @@ extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *);
59extern __le64 *squashfs_read_fragment_index_table(struct super_block *, 66extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
60 u64, u64, unsigned int); 67 u64, u64, unsigned int);
61 68
69/* file.c */
70void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
71 int);
72
73/* file_xxx.c */
74extern int squashfs_readpage_block(struct page *, u64, int);
75
62/* id.c */ 76/* id.c */
63extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); 77extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
64extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, u64, 78extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, u64,
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 52934a22f296..1da565cb50c3 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -50,6 +50,7 @@ struct squashfs_cache_entry {
50 wait_queue_head_t wait_queue; 50 wait_queue_head_t wait_queue;
51 struct squashfs_cache *cache; 51 struct squashfs_cache *cache;
52 void **data; 52 void **data;
53 struct squashfs_page_actor *actor;
53}; 54};
54 55
55struct squashfs_sb_info { 56struct squashfs_sb_info {
@@ -63,10 +64,9 @@ struct squashfs_sb_info {
63 __le64 *id_table; 64 __le64 *id_table;
64 __le64 *fragment_index; 65 __le64 *fragment_index;
65 __le64 *xattr_id_table; 66 __le64 *xattr_id_table;
66 struct mutex read_data_mutex;
67 struct mutex meta_index_mutex; 67 struct mutex meta_index_mutex;
68 struct meta_index *meta_index; 68 struct meta_index *meta_index;
69 void *stream; 69 struct squashfs_stream *stream;
70 __le64 *inode_lookup_table; 70 __le64 *inode_lookup_table;
71 u64 inode_table; 71 u64 inode_table;
72 u64 directory_table; 72 u64 directory_table;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 60553a9053ca..202df6312d4e 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -98,7 +98,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
98 msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); 98 msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
99 msblk->devblksize_log2 = ffz(~msblk->devblksize); 99 msblk->devblksize_log2 = ffz(~msblk->devblksize);
100 100
101 mutex_init(&msblk->read_data_mutex);
102 mutex_init(&msblk->meta_index_mutex); 101 mutex_init(&msblk->meta_index_mutex);
103 102
104 /* 103 /*
@@ -206,13 +205,14 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
206 goto failed_mount; 205 goto failed_mount;
207 206
208 /* Allocate read_page block */ 207 /* Allocate read_page block */
209 msblk->read_page = squashfs_cache_init("data", 1, msblk->block_size); 208 msblk->read_page = squashfs_cache_init("data",
209 squashfs_max_decompressors(), msblk->block_size);
210 if (msblk->read_page == NULL) { 210 if (msblk->read_page == NULL) {
211 ERROR("Failed to allocate read_page block\n"); 211 ERROR("Failed to allocate read_page block\n");
212 goto failed_mount; 212 goto failed_mount;
213 } 213 }
214 214
215 msblk->stream = squashfs_decompressor_init(sb, flags); 215 msblk->stream = squashfs_decompressor_setup(sb, flags);
216 if (IS_ERR(msblk->stream)) { 216 if (IS_ERR(msblk->stream)) {
217 err = PTR_ERR(msblk->stream); 217 err = PTR_ERR(msblk->stream);
218 msblk->stream = NULL; 218 msblk->stream = NULL;
@@ -336,7 +336,7 @@ failed_mount:
336 squashfs_cache_delete(msblk->block_cache); 336 squashfs_cache_delete(msblk->block_cache);
337 squashfs_cache_delete(msblk->fragment_cache); 337 squashfs_cache_delete(msblk->fragment_cache);
338 squashfs_cache_delete(msblk->read_page); 338 squashfs_cache_delete(msblk->read_page);
339 squashfs_decompressor_free(msblk, msblk->stream); 339 squashfs_decompressor_destroy(msblk);
340 kfree(msblk->inode_lookup_table); 340 kfree(msblk->inode_lookup_table);
341 kfree(msblk->fragment_index); 341 kfree(msblk->fragment_index);
342 kfree(msblk->id_table); 342 kfree(msblk->id_table);
@@ -383,7 +383,7 @@ static void squashfs_put_super(struct super_block *sb)
383 squashfs_cache_delete(sbi->block_cache); 383 squashfs_cache_delete(sbi->block_cache);
384 squashfs_cache_delete(sbi->fragment_cache); 384 squashfs_cache_delete(sbi->fragment_cache);
385 squashfs_cache_delete(sbi->read_page); 385 squashfs_cache_delete(sbi->read_page);
386 squashfs_decompressor_free(sbi, sbi->stream); 386 squashfs_decompressor_destroy(sbi);
387 kfree(sbi->id_table); 387 kfree(sbi->id_table);
388 kfree(sbi->fragment_index); 388 kfree(sbi->fragment_index);
389 kfree(sbi->meta_index); 389 kfree(sbi->meta_index);
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index 1760b7d108f6..c609624e4b8a 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -32,44 +32,70 @@
32#include "squashfs_fs_sb.h" 32#include "squashfs_fs_sb.h"
33#include "squashfs.h" 33#include "squashfs.h"
34#include "decompressor.h" 34#include "decompressor.h"
35#include "page_actor.h"
35 36
36struct squashfs_xz { 37struct squashfs_xz {
37 struct xz_dec *state; 38 struct xz_dec *state;
38 struct xz_buf buf; 39 struct xz_buf buf;
39}; 40};
40 41
41struct comp_opts { 42struct disk_comp_opts {
42 __le32 dictionary_size; 43 __le32 dictionary_size;
43 __le32 flags; 44 __le32 flags;
44}; 45};
45 46
46static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff, 47struct comp_opts {
47 int len) 48 int dict_size;
49};
50
51static void *squashfs_xz_comp_opts(struct squashfs_sb_info *msblk,
52 void *buff, int len)
48{ 53{
49 struct comp_opts *comp_opts = buff; 54 struct disk_comp_opts *comp_opts = buff;
50 struct squashfs_xz *stream; 55 struct comp_opts *opts;
51 int dict_size = msblk->block_size; 56 int err = 0, n;
52 int err, n; 57
58 opts = kmalloc(sizeof(*opts), GFP_KERNEL);
59 if (opts == NULL) {
60 err = -ENOMEM;
61 goto out2;
62 }
53 63
54 if (comp_opts) { 64 if (comp_opts) {
55 /* check compressor options are the expected length */ 65 /* check compressor options are the expected length */
56 if (len < sizeof(*comp_opts)) { 66 if (len < sizeof(*comp_opts)) {
57 err = -EIO; 67 err = -EIO;
58 goto failed; 68 goto out;
59 } 69 }
60 70
61 dict_size = le32_to_cpu(comp_opts->dictionary_size); 71 opts->dict_size = le32_to_cpu(comp_opts->dictionary_size);
62 72
63 /* the dictionary size should be 2^n or 2^n+2^(n+1) */ 73 /* the dictionary size should be 2^n or 2^n+2^(n+1) */
64 n = ffs(dict_size) - 1; 74 n = ffs(opts->dict_size) - 1;
65 if (dict_size != (1 << n) && dict_size != (1 << n) + 75 if (opts->dict_size != (1 << n) && opts->dict_size != (1 << n) +
66 (1 << (n + 1))) { 76 (1 << (n + 1))) {
67 err = -EIO; 77 err = -EIO;
68 goto failed; 78 goto out;
69 } 79 }
70 } 80 } else
81 /* use defaults */
82 opts->dict_size = max_t(int, msblk->block_size,
83 SQUASHFS_METADATA_SIZE);
84
85 return opts;
86
87out:
88 kfree(opts);
89out2:
90 return ERR_PTR(err);
91}
92
71 93
72 dict_size = max_t(int, dict_size, SQUASHFS_METADATA_SIZE); 94static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff)
95{
96 struct comp_opts *comp_opts = buff;
97 struct squashfs_xz *stream;
98 int err;
73 99
74 stream = kmalloc(sizeof(*stream), GFP_KERNEL); 100 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
75 if (stream == NULL) { 101 if (stream == NULL) {
@@ -77,7 +103,7 @@ static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
77 goto failed; 103 goto failed;
78 } 104 }
79 105
80 stream->state = xz_dec_init(XZ_PREALLOC, dict_size); 106 stream->state = xz_dec_init(XZ_PREALLOC, comp_opts->dict_size);
81 if (stream->state == NULL) { 107 if (stream->state == NULL) {
82 kfree(stream); 108 kfree(stream);
83 err = -ENOMEM; 109 err = -ENOMEM;
@@ -103,42 +129,37 @@ static void squashfs_xz_free(void *strm)
103} 129}
104 130
105 131
106static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer, 132static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
107 struct buffer_head **bh, int b, int offset, int length, int srclength, 133 struct buffer_head **bh, int b, int offset, int length,
108 int pages) 134 struct squashfs_page_actor *output)
109{ 135{
110 enum xz_ret xz_err; 136 enum xz_ret xz_err;
111 int avail, total = 0, k = 0, page = 0; 137 int avail, total = 0, k = 0;
112 struct squashfs_xz *stream = msblk->stream; 138 struct squashfs_xz *stream = strm;
113
114 mutex_lock(&msblk->read_data_mutex);
115 139
116 xz_dec_reset(stream->state); 140 xz_dec_reset(stream->state);
117 stream->buf.in_pos = 0; 141 stream->buf.in_pos = 0;
118 stream->buf.in_size = 0; 142 stream->buf.in_size = 0;
119 stream->buf.out_pos = 0; 143 stream->buf.out_pos = 0;
120 stream->buf.out_size = PAGE_CACHE_SIZE; 144 stream->buf.out_size = PAGE_CACHE_SIZE;
121 stream->buf.out = buffer[page++]; 145 stream->buf.out = squashfs_first_page(output);
122 146
123 do { 147 do {
124 if (stream->buf.in_pos == stream->buf.in_size && k < b) { 148 if (stream->buf.in_pos == stream->buf.in_size && k < b) {
125 avail = min(length, msblk->devblksize - offset); 149 avail = min(length, msblk->devblksize - offset);
126 length -= avail; 150 length -= avail;
127 wait_on_buffer(bh[k]);
128 if (!buffer_uptodate(bh[k]))
129 goto release_mutex;
130
131 stream->buf.in = bh[k]->b_data + offset; 151 stream->buf.in = bh[k]->b_data + offset;
132 stream->buf.in_size = avail; 152 stream->buf.in_size = avail;
133 stream->buf.in_pos = 0; 153 stream->buf.in_pos = 0;
134 offset = 0; 154 offset = 0;
135 } 155 }
136 156
137 if (stream->buf.out_pos == stream->buf.out_size 157 if (stream->buf.out_pos == stream->buf.out_size) {
138 && page < pages) { 158 stream->buf.out = squashfs_next_page(output);
139 stream->buf.out = buffer[page++]; 159 if (stream->buf.out != NULL) {
140 stream->buf.out_pos = 0; 160 stream->buf.out_pos = 0;
141 total += PAGE_CACHE_SIZE; 161 total += PAGE_CACHE_SIZE;
162 }
142 } 163 }
143 164
144 xz_err = xz_dec_run(stream->state, &stream->buf); 165 xz_err = xz_dec_run(stream->state, &stream->buf);
@@ -147,23 +168,14 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
147 put_bh(bh[k++]); 168 put_bh(bh[k++]);
148 } while (xz_err == XZ_OK); 169 } while (xz_err == XZ_OK);
149 170
150 if (xz_err != XZ_STREAM_END) { 171 squashfs_finish_page(output);
151 ERROR("xz_dec_run error, data probably corrupt\n");
152 goto release_mutex;
153 }
154
155 if (k < b) {
156 ERROR("xz_uncompress error, input remaining\n");
157 goto release_mutex;
158 }
159 172
160 total += stream->buf.out_pos; 173 if (xz_err != XZ_STREAM_END || k < b)
161 mutex_unlock(&msblk->read_data_mutex); 174 goto out;
162 return total;
163 175
164release_mutex: 176 return total + stream->buf.out_pos;
165 mutex_unlock(&msblk->read_data_mutex);
166 177
178out:
167 for (; k < b; k++) 179 for (; k < b; k++)
168 put_bh(bh[k]); 180 put_bh(bh[k]);
169 181
@@ -172,6 +184,7 @@ release_mutex:
172 184
173const struct squashfs_decompressor squashfs_xz_comp_ops = { 185const struct squashfs_decompressor squashfs_xz_comp_ops = {
174 .init = squashfs_xz_init, 186 .init = squashfs_xz_init,
187 .comp_opts = squashfs_xz_comp_opts,
175 .free = squashfs_xz_free, 188 .free = squashfs_xz_free,
176 .decompress = squashfs_xz_uncompress, 189 .decompress = squashfs_xz_uncompress,
177 .id = XZ_COMPRESSION, 190 .id = XZ_COMPRESSION,
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 55d918fd2d86..8727caba6882 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -32,8 +32,9 @@
32#include "squashfs_fs_sb.h" 32#include "squashfs_fs_sb.h"
33#include "squashfs.h" 33#include "squashfs.h"
34#include "decompressor.h" 34#include "decompressor.h"
35#include "page_actor.h"
35 36
36static void *zlib_init(struct squashfs_sb_info *dummy, void *buff, int len) 37static void *zlib_init(struct squashfs_sb_info *dummy, void *buff)
37{ 38{
38 z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL); 39 z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
39 if (stream == NULL) 40 if (stream == NULL)
@@ -61,44 +62,37 @@ static void zlib_free(void *strm)
61} 62}
62 63
63 64
64static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer, 65static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
65 struct buffer_head **bh, int b, int offset, int length, int srclength, 66 struct buffer_head **bh, int b, int offset, int length,
66 int pages) 67 struct squashfs_page_actor *output)
67{ 68{
68 int zlib_err, zlib_init = 0; 69 int zlib_err, zlib_init = 0, k = 0;
69 int k = 0, page = 0; 70 z_stream *stream = strm;
70 z_stream *stream = msblk->stream;
71
72 mutex_lock(&msblk->read_data_mutex);
73 71
74 stream->avail_out = 0; 72 stream->avail_out = PAGE_CACHE_SIZE;
73 stream->next_out = squashfs_first_page(output);
75 stream->avail_in = 0; 74 stream->avail_in = 0;
76 75
77 do { 76 do {
78 if (stream->avail_in == 0 && k < b) { 77 if (stream->avail_in == 0 && k < b) {
79 int avail = min(length, msblk->devblksize - offset); 78 int avail = min(length, msblk->devblksize - offset);
80 length -= avail; 79 length -= avail;
81 wait_on_buffer(bh[k]);
82 if (!buffer_uptodate(bh[k]))
83 goto release_mutex;
84
85 stream->next_in = bh[k]->b_data + offset; 80 stream->next_in = bh[k]->b_data + offset;
86 stream->avail_in = avail; 81 stream->avail_in = avail;
87 offset = 0; 82 offset = 0;
88 } 83 }
89 84
90 if (stream->avail_out == 0 && page < pages) { 85 if (stream->avail_out == 0) {
91 stream->next_out = buffer[page++]; 86 stream->next_out = squashfs_next_page(output);
92 stream->avail_out = PAGE_CACHE_SIZE; 87 if (stream->next_out != NULL)
88 stream->avail_out = PAGE_CACHE_SIZE;
93 } 89 }
94 90
95 if (!zlib_init) { 91 if (!zlib_init) {
96 zlib_err = zlib_inflateInit(stream); 92 zlib_err = zlib_inflateInit(stream);
97 if (zlib_err != Z_OK) { 93 if (zlib_err != Z_OK) {
98 ERROR("zlib_inflateInit returned unexpected " 94 squashfs_finish_page(output);
99 "result 0x%x, srclength %d\n", 95 goto out;
100 zlib_err, srclength);
101 goto release_mutex;
102 } 96 }
103 zlib_init = 1; 97 zlib_init = 1;
104 } 98 }
@@ -109,29 +103,21 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
109 put_bh(bh[k++]); 103 put_bh(bh[k++]);
110 } while (zlib_err == Z_OK); 104 } while (zlib_err == Z_OK);
111 105
112 if (zlib_err != Z_STREAM_END) { 106 squashfs_finish_page(output);
113 ERROR("zlib_inflate error, data probably corrupt\n");
114 goto release_mutex;
115 }
116 107
117 zlib_err = zlib_inflateEnd(stream); 108 if (zlib_err != Z_STREAM_END)
118 if (zlib_err != Z_OK) { 109 goto out;
119 ERROR("zlib_inflate error, data probably corrupt\n");
120 goto release_mutex;
121 }
122 110
123 if (k < b) { 111 zlib_err = zlib_inflateEnd(stream);
124 ERROR("zlib_uncompress error, data remaining\n"); 112 if (zlib_err != Z_OK)
125 goto release_mutex; 113 goto out;
126 }
127 114
128 length = stream->total_out; 115 if (k < b)
129 mutex_unlock(&msblk->read_data_mutex); 116 goto out;
130 return length;
131 117
132release_mutex: 118 return stream->total_out;
133 mutex_unlock(&msblk->read_data_mutex);
134 119
120out:
135 for (; k < b; k++) 121 for (; k < b; k++)
136 put_bh(bh[k]); 122 put_bh(bh[k]);
137 123
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 79b5da2acbe1..b94f93685093 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -609,7 +609,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
609 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; 609 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
610 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; 610 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
611 struct sysfs_open_file *of; 611 struct sysfs_open_file *of;
612 bool has_read, has_write; 612 bool has_read, has_write, has_mmap;
613 int error = -EACCES; 613 int error = -EACCES;
614 614
615 /* need attr_sd for attr and ops, its parent for kobj */ 615 /* need attr_sd for attr and ops, its parent for kobj */
@@ -621,6 +621,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
621 621
622 has_read = battr->read || battr->mmap; 622 has_read = battr->read || battr->mmap;
623 has_write = battr->write || battr->mmap; 623 has_write = battr->write || battr->mmap;
624 has_mmap = battr->mmap;
624 } else { 625 } else {
625 const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); 626 const struct sysfs_ops *ops = sysfs_file_ops(attr_sd);
626 627
@@ -632,6 +633,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
632 633
633 has_read = ops->show; 634 has_read = ops->show;
634 has_write = ops->store; 635 has_write = ops->store;
636 has_mmap = false;
635 } 637 }
636 638
637 /* check perms and supported operations */ 639 /* check perms and supported operations */
@@ -649,7 +651,23 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
649 if (!of) 651 if (!of)
650 goto err_out; 652 goto err_out;
651 653
652 mutex_init(&of->mutex); 654 /*
655 * The following is done to give a different lockdep key to
656 * @of->mutex for files which implement mmap. This is a rather
657 * crude way to avoid false positive lockdep warning around
658 * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
659 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
660 * which mm->mmap_sem nests, while holding @of->mutex. As each
661 * open file has a separate mutex, it's okay as long as those don't
662 * happen on the same file. At this point, we can't easily give
663 * each file a separate locking class. Let's differentiate on
664 * whether the file has mmap or not for now.
665 */
666 if (has_mmap)
667 mutex_init(&of->mutex);
668 else
669 mutex_init(&of->mutex);
670
653 of->sd = attr_sd; 671 of->sd = attr_sd;
654 of->file = file; 672 of->file = file;
655 673
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 1c02da8bb7df..3ef11b22e750 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -1137,6 +1137,7 @@ xfs_bmap_add_attrfork(
1137 int committed; /* xaction was committed */ 1137 int committed; /* xaction was committed */
1138 int logflags; /* logging flags */ 1138 int logflags; /* logging flags */
1139 int error; /* error return value */ 1139 int error; /* error return value */
1140 int cancel_flags = 0;
1140 1141
1141 ASSERT(XFS_IFORK_Q(ip) == 0); 1142 ASSERT(XFS_IFORK_Q(ip) == 0);
1142 1143
@@ -1147,19 +1148,20 @@ xfs_bmap_add_attrfork(
1147 if (rsvd) 1148 if (rsvd)
1148 tp->t_flags |= XFS_TRANS_RESERVE; 1149 tp->t_flags |= XFS_TRANS_RESERVE;
1149 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_addafork, blks, 0); 1150 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_addafork, blks, 0);
1150 if (error) 1151 if (error) {
1151 goto error0; 1152 xfs_trans_cancel(tp, 0);
1153 return error;
1154 }
1155 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1152 xfs_ilock(ip, XFS_ILOCK_EXCL); 1156 xfs_ilock(ip, XFS_ILOCK_EXCL);
1153 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 1157 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1154 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 1158 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1155 XFS_QMOPT_RES_REGBLKS); 1159 XFS_QMOPT_RES_REGBLKS);
1156 if (error) { 1160 if (error)
1157 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1161 goto trans_cancel;
1158 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES); 1162 cancel_flags |= XFS_TRANS_ABORT;
1159 return error;
1160 }
1161 if (XFS_IFORK_Q(ip)) 1163 if (XFS_IFORK_Q(ip))
1162 goto error1; 1164 goto trans_cancel;
1163 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 1165 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1164 /* 1166 /*
1165 * For inodes coming from pre-6.2 filesystems. 1167 * For inodes coming from pre-6.2 filesystems.
@@ -1169,7 +1171,7 @@ xfs_bmap_add_attrfork(
1169 } 1171 }
1170 ASSERT(ip->i_d.di_anextents == 0); 1172 ASSERT(ip->i_d.di_anextents == 0);
1171 1173
1172 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1174 xfs_trans_ijoin(tp, ip, 0);
1173 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1175 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1174 1176
1175 switch (ip->i_d.di_format) { 1177 switch (ip->i_d.di_format) {
@@ -1191,7 +1193,7 @@ xfs_bmap_add_attrfork(
1191 default: 1193 default:
1192 ASSERT(0); 1194 ASSERT(0);
1193 error = XFS_ERROR(EINVAL); 1195 error = XFS_ERROR(EINVAL);
1194 goto error1; 1196 goto trans_cancel;
1195 } 1197 }
1196 1198
1197 ASSERT(ip->i_afp == NULL); 1199 ASSERT(ip->i_afp == NULL);
@@ -1219,7 +1221,7 @@ xfs_bmap_add_attrfork(
1219 if (logflags) 1221 if (logflags)
1220 xfs_trans_log_inode(tp, ip, logflags); 1222 xfs_trans_log_inode(tp, ip, logflags);
1221 if (error) 1223 if (error)
1222 goto error2; 1224 goto bmap_cancel;
1223 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1225 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1224 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1226 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1225 __int64_t sbfields = 0; 1227 __int64_t sbfields = 0;
@@ -1242,14 +1244,16 @@ xfs_bmap_add_attrfork(
1242 1244
1243 error = xfs_bmap_finish(&tp, &flist, &committed); 1245 error = xfs_bmap_finish(&tp, &flist, &committed);
1244 if (error) 1246 if (error)
1245 goto error2; 1247 goto bmap_cancel;
1246 return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1248 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1247error2: 1249 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1250 return error;
1251
1252bmap_cancel:
1248 xfs_bmap_cancel(&flist); 1253 xfs_bmap_cancel(&flist);
1249error1: 1254trans_cancel:
1255 xfs_trans_cancel(tp, cancel_flags);
1250 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1256 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1251error0:
1252 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
1253 return error; 1257 return error;
1254} 1258}
1255 1259
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 8367d6dc18c9..4f11ef011139 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -157,7 +157,7 @@ xfs_ioc_trim(
157 struct xfs_mount *mp, 157 struct xfs_mount *mp,
158 struct fstrim_range __user *urange) 158 struct fstrim_range __user *urange)
159{ 159{
160 struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue; 160 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev);
161 unsigned int granularity = q->limits.discard_granularity; 161 unsigned int granularity = q->limits.discard_granularity;
162 struct fstrim_range range; 162 struct fstrim_range range;
163 xfs_daddr_t start, end, minlen; 163 xfs_daddr_t start, end, minlen;
@@ -180,7 +180,8 @@ xfs_ioc_trim(
180 * matter as trimming blocks is an advisory interface. 180 * matter as trimming blocks is an advisory interface.
181 */ 181 */
182 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || 182 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
183 range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp))) 183 range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) ||
184 range.len < mp->m_sb.sb_blocksize)
184 return -XFS_ERROR(EINVAL); 185 return -XFS_ERROR(EINVAL);
185 186
186 start = BTOBB(range.start); 187 start = BTOBB(range.start);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a6e54b3319bd..02fb943cbf22 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -220,6 +220,8 @@ xfs_growfs_data_private(
220 */ 220 */
221 nfree = 0; 221 nfree = 0;
222 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { 222 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
223 __be32 *agfl_bno;
224
223 /* 225 /*
224 * AG freespace header block 226 * AG freespace header block
225 */ 227 */
@@ -279,8 +281,10 @@ xfs_growfs_data_private(
279 agfl->agfl_seqno = cpu_to_be32(agno); 281 agfl->agfl_seqno = cpu_to_be32(agno);
280 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid); 282 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
281 } 283 }
284
285 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
282 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) 286 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
283 agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); 287 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
284 288
285 error = xfs_bwrite(bp); 289 error = xfs_bwrite(bp);
286 xfs_buf_relse(bp); 290 xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 4d613401a5e0..33ad9a77791f 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -442,7 +442,8 @@ xfs_attrlist_by_handle(
442 return -XFS_ERROR(EPERM); 442 return -XFS_ERROR(EPERM);
443 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) 443 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
444 return -XFS_ERROR(EFAULT); 444 return -XFS_ERROR(EFAULT);
445 if (al_hreq.buflen > XATTR_LIST_MAX) 445 if (al_hreq.buflen < sizeof(struct attrlist) ||
446 al_hreq.buflen > XATTR_LIST_MAX)
446 return -XFS_ERROR(EINVAL); 447 return -XFS_ERROR(EINVAL);
447 448
448 /* 449 /*
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index e8fb1231db81..a7992f8de9d3 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -356,7 +356,8 @@ xfs_compat_attrlist_by_handle(
356 if (copy_from_user(&al_hreq, arg, 356 if (copy_from_user(&al_hreq, arg,
357 sizeof(compat_xfs_fsop_attrlist_handlereq_t))) 357 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
358 return -XFS_ERROR(EFAULT); 358 return -XFS_ERROR(EFAULT);
359 if (al_hreq.buflen > XATTR_LIST_MAX) 359 if (al_hreq.buflen < sizeof(struct attrlist) ||
360 al_hreq.buflen > XATTR_LIST_MAX)
360 return -XFS_ERROR(EINVAL); 361 return -XFS_ERROR(EINVAL);
361 362
362 /* 363 /*
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index da88f167af78..02df7b408a26 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -41,6 +41,7 @@
41#include "xfs_fsops.h" 41#include "xfs_fsops.h"
42#include "xfs_trace.h" 42#include "xfs_trace.h"
43#include "xfs_icache.h" 43#include "xfs_icache.h"
44#include "xfs_dinode.h"
44 45
45 46
46#ifdef HAVE_PERCPU_SB 47#ifdef HAVE_PERCPU_SB
@@ -718,8 +719,22 @@ xfs_mountfs(
718 * Set the inode cluster size. 719 * Set the inode cluster size.
719 * This may still be overridden by the file system 720 * This may still be overridden by the file system
720 * block size if it is larger than the chosen cluster size. 721 * block size if it is larger than the chosen cluster size.
722 *
723 * For v5 filesystems, scale the cluster size with the inode size to
724 * keep a constant ratio of inode per cluster buffer, but only if mkfs
725 * has set the inode alignment value appropriately for larger cluster
726 * sizes.
721 */ 727 */
722 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; 728 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
729 if (xfs_sb_version_hascrc(&mp->m_sb)) {
730 int new_size = mp->m_inode_cluster_size;
731
732 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
733 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
734 mp->m_inode_cluster_size = new_size;
735 xfs_info(mp, "Using inode cluster size of %d bytes",
736 mp->m_inode_cluster_size);
737 }
723 738
724 /* 739 /*
725 * Set inode alignment fields 740 * Set inode alignment fields
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1d8101a10d8e..a466c5e5826e 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -112,7 +112,7 @@ typedef struct xfs_mount {
112 __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ 112 __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
113 __uint8_t m_agno_log; /* log #ag's */ 113 __uint8_t m_agno_log; /* log #ag's */
114 __uint8_t m_agino_log; /* #bits for agino in inum */ 114 __uint8_t m_agino_log; /* #bits for agino in inum */
115 __uint16_t m_inode_cluster_size;/* min inode buf size */ 115 uint m_inode_cluster_size;/* min inode buf size */
116 uint m_blockmask; /* sb_blocksize-1 */ 116 uint m_blockmask; /* sb_blocksize-1 */
117 uint m_blockwsize; /* sb_blocksize in words */ 117 uint m_blockwsize; /* sb_blocksize in words */
118 uint m_blockwmask; /* blockwsize-1 */ 118 uint m_blockwmask; /* blockwsize-1 */
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 1bba7f60d94c..50c3f5614288 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -111,12 +111,14 @@ xfs_trans_log_inode(
111 111
112 /* 112 /*
113 * First time we log the inode in a transaction, bump the inode change 113 * First time we log the inode in a transaction, bump the inode change
114 * counter if it is configured for this to occur. 114 * counter if it is configured for this to occur. We don't use
115 * inode_inc_version() because there is no need for extra locking around
116 * i_version as we already hold the inode locked exclusively for
117 * metadata modification.
115 */ 118 */
116 if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) && 119 if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) &&
117 IS_I_VERSION(VFS_I(ip))) { 120 IS_I_VERSION(VFS_I(ip))) {
118 inode_inc_iversion(VFS_I(ip)); 121 ip->i_d.di_changecount = ++VFS_I(ip)->i_version;
119 ip->i_d.di_changecount = VFS_I(ip)->i_version;
120 flags |= XFS_ILOG_CORE; 122 flags |= XFS_ILOG_CORE;
121 } 123 }
122 124
diff --git a/fs/xfs/xfs_trans_resv.c b/fs/xfs/xfs_trans_resv.c
index d53d9f0627a7..2fd59c0dae66 100644
--- a/fs/xfs/xfs_trans_resv.c
+++ b/fs/xfs/xfs_trans_resv.c
@@ -385,8 +385,7 @@ xfs_calc_ifree_reservation(
385 xfs_calc_inode_res(mp, 1) + 385 xfs_calc_inode_res(mp, 1) +
386 xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + 386 xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
387 xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) + 387 xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
388 MAX((__uint16_t)XFS_FSB_TO_B(mp, 1), 388 max_t(uint, XFS_FSB_TO_B(mp, 1), XFS_INODE_CLUSTER_SIZE(mp)) +
389 XFS_INODE_CLUSTER_SIZE(mp)) +
390 xfs_calc_buf_res(1, 0) + 389 xfs_calc_buf_res(1, 0) +
391 xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + 390 xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) +
392 mp->m_in_maxlevels, 0) + 391 mp->m_in_maxlevels, 0) +
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index d98c67001840..3ea214cff349 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -83,7 +83,9 @@
83 * Should the subsystem abort the loading of an ACPI table if the 83 * Should the subsystem abort the loading of an ACPI table if the
84 * table checksum is incorrect? 84 * table checksum is incorrect?
85 */ 85 */
86#ifndef ACPI_CHECKSUM_ABORT
86#define ACPI_CHECKSUM_ABORT FALSE 87#define ACPI_CHECKSUM_ABORT FALSE
88#endif
87 89
88/* 90/*
89 * Generate a version of ACPICA that only supports "reduced hardware" 91 * Generate a version of ACPICA that only supports "reduced hardware"
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 89c60b0f6408..c602c7718421 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -100,6 +100,7 @@ enum acpi_hotplug_mode {
100struct acpi_hotplug_profile { 100struct acpi_hotplug_profile {
101 struct kobject kobj; 101 struct kobject kobj;
102 bool enabled:1; 102 bool enabled:1;
103 bool ignore:1;
103 enum acpi_hotplug_mode mode; 104 enum acpi_hotplug_mode mode;
104}; 105};
105 106
@@ -431,9 +432,9 @@ static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
431{ 432{
432 return acpi_find_child(handle, addr, false); 433 return acpi_find_child(handle, addr, false);
433} 434}
435void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr);
434int acpi_is_root_bridge(acpi_handle); 436int acpi_is_root_bridge(acpi_handle);
435struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); 437struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
436#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
437 438
438int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); 439int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
439int acpi_disable_wakeup_device_power(struct acpi_device *dev); 440int acpi_disable_wakeup_device_power(struct acpi_device *dev);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index d8f9457755b4..4278aba96503 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20130927 49#define ACPI_CA_VERSION 0x20131115
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
new file mode 100644
index 000000000000..f57eb7b5c23b
--- /dev/null
+++ b/include/asm-generic/simd.h
@@ -0,0 +1,14 @@
1
2#include <linux/hardirq.h>
3
4/*
5 * may_use_simd - whether it is allowable at this time to issue SIMD
6 * instructions or access the SIMD register file
7 *
8 * As architectures typically don't preserve the SIMD register file when
9 * taking an interrupt, !in_interrupt() should be a reasonable default.
10 */
11static __must_check inline bool may_use_simd(void)
12{
13 return !in_interrupt();
14}
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 3f21f1b72e45..d3909effd725 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -49,4 +49,12 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
49 return (val + c->high_bits) & ~rhs; 49 return (val + c->high_bits) & ~rhs;
50} 50}
51 51
52#ifndef zero_bytemask
53#ifdef CONFIG_64BIT
54#define zero_bytemask(mask) (~0ul << fls64(mask))
55#else
56#define zero_bytemask(mask) (~0ul << fls(mask))
57#endif /* CONFIG_64BIT */
58#endif /* zero_bytemask */
59
52#endif /* _ASM_WORD_AT_A_TIME_H */ 60#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/include/crypto/ablk_helper.h
index 4f93df50c23e..4f93df50c23e 100644
--- a/arch/x86/include/asm/crypto/ablk_helper.h
+++ b/include/crypto/ablk_helper.h
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 418d270e1806..e73c19e90e38 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -386,5 +386,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask)
386 return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; 386 return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
387} 387}
388 388
389#endif /* _CRYPTO_ALGAPI_H */ 389noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
390
391/**
392 * crypto_memneq - Compare two areas of memory without leaking
393 * timing information.
394 *
395 * @a: One area of memory
396 * @b: Another area of memory
397 * @size: The size of the area.
398 *
399 * Returns 0 when data is equal, 1 otherwise.
400 */
401static inline int crypto_memneq(const void *a, const void *b, size_t size)
402{
403 return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
404}
390 405
406#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h
index e47b044929a8..6775059539b5 100644
--- a/include/crypto/authenc.h
+++ b/include/crypto/authenc.h
@@ -23,5 +23,15 @@ struct crypto_authenc_key_param {
23 __be32 enckeylen; 23 __be32 enckeylen;
24}; 24};
25 25
26#endif /* _CRYPTO_AUTHENC_H */ 26struct crypto_authenc_keys {
27 const u8 *authkey;
28 const u8 *enckey;
29
30 unsigned int authkeylen;
31 unsigned int enckeylen;
32};
27 33
34int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
35 unsigned int keylen);
36
37#endif /* _CRYPTO_AUTHENC_H */
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
new file mode 100644
index 000000000000..e1e5a3e5dd1b
--- /dev/null
+++ b/include/crypto/hash_info.h
@@ -0,0 +1,40 @@
1/*
2 * Hash Info: Hash algorithms information
3 *
4 * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#ifndef _CRYPTO_HASH_INFO_H
14#define _CRYPTO_HASH_INFO_H
15
16#include <crypto/sha.h>
17#include <crypto/md5.h>
18
19#include <uapi/linux/hash_info.h>
20
21/* not defined in include/crypto/ */
22#define RMD128_DIGEST_SIZE 16
23#define RMD160_DIGEST_SIZE 20
24#define RMD256_DIGEST_SIZE 32
25#define RMD320_DIGEST_SIZE 40
26
27/* not defined in include/crypto/ */
28#define WP512_DIGEST_SIZE 64
29#define WP384_DIGEST_SIZE 48
30#define WP256_DIGEST_SIZE 32
31
32/* not defined in include/crypto/ */
33#define TGR128_DIGEST_SIZE 16
34#define TGR160_DIGEST_SIZE 20
35#define TGR192_DIGEST_SIZE 24
36
37extern const char *const hash_algo_name[HASH_ALGO__LAST];
38extern const int hash_digest_size[HASH_ALGO__LAST];
39
40#endif /* _CRYPTO_HASH_INFO_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index f5b0224c9967..fc09732613ad 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,6 +15,7 @@
15#define _LINUX_PUBLIC_KEY_H 15#define _LINUX_PUBLIC_KEY_H
16 16
17#include <linux/mpi.h> 17#include <linux/mpi.h>
18#include <crypto/hash_info.h>
18 19
19enum pkey_algo { 20enum pkey_algo {
20 PKEY_ALGO_DSA, 21 PKEY_ALGO_DSA,
@@ -22,21 +23,11 @@ enum pkey_algo {
22 PKEY_ALGO__LAST 23 PKEY_ALGO__LAST
23}; 24};
24 25
25extern const char *const pkey_algo[PKEY_ALGO__LAST]; 26extern const char *const pkey_algo_name[PKEY_ALGO__LAST];
27extern const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST];
26 28
27enum pkey_hash_algo { 29/* asymmetric key implementation supports only up to SHA224 */
28 PKEY_HASH_MD4, 30#define PKEY_HASH__LAST (HASH_ALGO_SHA224 + 1)
29 PKEY_HASH_MD5,
30 PKEY_HASH_SHA1,
31 PKEY_HASH_RIPE_MD_160,
32 PKEY_HASH_SHA256,
33 PKEY_HASH_SHA384,
34 PKEY_HASH_SHA512,
35 PKEY_HASH_SHA224,
36 PKEY_HASH__LAST
37};
38
39extern const char *const pkey_hash_algo[PKEY_HASH__LAST];
40 31
41enum pkey_id_type { 32enum pkey_id_type {
42 PKEY_ID_PGP, /* OpenPGP generated key ID */ 33 PKEY_ID_PGP, /* OpenPGP generated key ID */
@@ -44,7 +35,7 @@ enum pkey_id_type {
44 PKEY_ID_TYPE__LAST 35 PKEY_ID_TYPE__LAST
45}; 36};
46 37
47extern const char *const pkey_id_type[PKEY_ID_TYPE__LAST]; 38extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST];
48 39
49/* 40/*
50 * Cryptographic data for the public-key subtype of the asymmetric key type. 41 * Cryptographic data for the public-key subtype of the asymmetric key type.
@@ -59,6 +50,7 @@ struct public_key {
59#define PKEY_CAN_DECRYPT 0x02 50#define PKEY_CAN_DECRYPT 0x02
60#define PKEY_CAN_SIGN 0x04 51#define PKEY_CAN_SIGN 0x04
61#define PKEY_CAN_VERIFY 0x08 52#define PKEY_CAN_VERIFY 0x08
53 enum pkey_algo pkey_algo : 8;
62 enum pkey_id_type id_type : 8; 54 enum pkey_id_type id_type : 8;
63 union { 55 union {
64 MPI mpi[5]; 56 MPI mpi[5];
@@ -88,7 +80,8 @@ struct public_key_signature {
88 u8 *digest; 80 u8 *digest;
89 u8 digest_size; /* Number of bytes in digest */ 81 u8 digest_size; /* Number of bytes in digest */
90 u8 nr_mpi; /* Occupancy of mpi[] */ 82 u8 nr_mpi; /* Occupancy of mpi[] */
91 enum pkey_hash_algo pkey_hash_algo : 8; 83 enum pkey_algo pkey_algo : 8;
84 enum hash_algo pkey_hash_algo : 8;
92 union { 85 union {
93 MPI mpi[2]; 86 MPI mpi[2];
94 struct { 87 struct {
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 13621cc8cf4c..6a626a507b8c 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -36,6 +36,7 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
36{ 36{
37 sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); 37 sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
38 sg1[num - 1].page_link &= ~0x02; 38 sg1[num - 1].page_link &= ~0x02;
39 sg1[num - 1].page_link |= 0x01;
39} 40}
40 41
41static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) 42static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
@@ -43,7 +44,7 @@ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
43 if (sg_is_last(sg)) 44 if (sg_is_last(sg))
44 return NULL; 45 return NULL;
45 46
46 return (++sg)->length ? sg : (void *)sg_page(sg); 47 return (++sg)->length ? sg : sg_chain_ptr(sg);
47} 48}
48 49
49static inline void scatterwalk_crypto_chain(struct scatterlist *head, 50static inline void scatterwalk_crypto_chain(struct scatterlist *head,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 751eaffbf0d5..ee127ec33c60 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -169,6 +169,7 @@ struct ttm_tt;
169 * @offset: The current GPU offset, which can have different meanings 169 * @offset: The current GPU offset, which can have different meanings
170 * depending on the memory type. For SYSTEM type memory, it should be 0. 170 * depending on the memory type. For SYSTEM type memory, it should be 0.
171 * @cur_placement: Hint of current placement. 171 * @cur_placement: Hint of current placement.
172 * @wu_mutex: Wait unreserved mutex.
172 * 173 *
173 * Base class for TTM buffer object, that deals with data placement and CPU 174 * Base class for TTM buffer object, that deals with data placement and CPU
174 * mappings. GPU mappings are really up to the driver, but for simpler GPUs 175 * mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -250,6 +251,7 @@ struct ttm_buffer_object {
250 251
251 struct reservation_object *resv; 252 struct reservation_object *resv;
252 struct reservation_object ttm_resv; 253 struct reservation_object ttm_resv;
254 struct mutex wu_mutex;
253}; 255};
254 256
255/** 257/**
@@ -702,5 +704,5 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
702 size_t count, loff_t *f_pos, bool write); 704 size_t count, loff_t *f_pos, bool write);
703 705
704extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); 706extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
705 707extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
706#endif 708#endif
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index ec8a1d306510..16db7d01a336 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
70/** 70/**
71 * function ttm_eu_reserve_buffers 71 * function ttm_eu_reserve_buffers
72 * 72 *
73 * @ticket: [out] ww_acquire_ctx returned by call. 73 * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
74 * non-blocking reserves should be tried.
74 * @list: thread private list of ttm_validate_buffer structs. 75 * @list: thread private list of ttm_validate_buffer structs.
75 * 76 *
76 * Tries to reserve bos pointed to by the list entries for validation. 77 * Tries to reserve bos pointed to by the list entries for validation.
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index fc0cf0649901..58b029894eb3 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -41,6 +41,7 @@
41#include <drm/drm_hashtab.h> 41#include <drm/drm_hashtab.h>
42#include <linux/kref.h> 42#include <linux/kref.h>
43#include <linux/rcupdate.h> 43#include <linux/rcupdate.h>
44#include <linux/dma-buf.h>
44#include <ttm/ttm_memory.h> 45#include <ttm/ttm_memory.h>
45 46
46/** 47/**
@@ -77,6 +78,7 @@ enum ttm_object_type {
77 ttm_fence_type, 78 ttm_fence_type,
78 ttm_buffer_type, 79 ttm_buffer_type,
79 ttm_lock_type, 80 ttm_lock_type,
81 ttm_prime_type,
80 ttm_driver_type0 = 256, 82 ttm_driver_type0 = 256,
81 ttm_driver_type1, 83 ttm_driver_type1,
82 ttm_driver_type2, 84 ttm_driver_type2,
@@ -132,6 +134,30 @@ struct ttm_base_object {
132 enum ttm_ref_type ref_type); 134 enum ttm_ref_type ref_type);
133}; 135};
134 136
137
138/**
139 * struct ttm_prime_object - Modified base object that is prime-aware
140 *
141 * @base: struct ttm_base_object that we derive from
142 * @mutex: Mutex protecting the @dma_buf member.
143 * @size: Size of the dma_buf associated with this object
144 * @real_type: Type of the underlying object. Needed since we're setting
145 * the value of @base::object_type to ttm_prime_type
146 * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
147 * object.
148 * @refcount_release: The underlying object's release method. Needed since
149 * we set @base::refcount_release to our own release method.
150 */
151
152struct ttm_prime_object {
153 struct ttm_base_object base;
154 struct mutex mutex;
155 size_t size;
156 enum ttm_object_type real_type;
157 struct dma_buf *dma_buf;
158 void (*refcount_release) (struct ttm_base_object **);
159};
160
135/** 161/**
136 * ttm_base_object_init 162 * ttm_base_object_init
137 * 163 *
@@ -248,14 +274,18 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
248/** 274/**
249 * ttm_object device init - initialize a struct ttm_object_device 275 * ttm_object device init - initialize a struct ttm_object_device
250 * 276 *
277 * @mem_glob: struct ttm_mem_global for memory accounting.
251 * @hash_order: Order of hash table used to hash the base objects. 278 * @hash_order: Order of hash table used to hash the base objects.
279 * @ops: DMA buf ops for prime objects of this device.
252 * 280 *
253 * This function is typically called on device initialization to prepare 281 * This function is typically called on device initialization to prepare
254 * data structures needed for ttm base and ref objects. 282 * data structures needed for ttm base and ref objects.
255 */ 283 */
256 284
257extern struct ttm_object_device *ttm_object_device_init 285extern struct ttm_object_device *
258 (struct ttm_mem_global *mem_glob, unsigned int hash_order); 286ttm_object_device_init(struct ttm_mem_global *mem_glob,
287 unsigned int hash_order,
288 const struct dma_buf_ops *ops);
259 289
260/** 290/**
261 * ttm_object_device_release - release data held by a ttm_object_device 291 * ttm_object_device_release - release data held by a ttm_object_device
@@ -272,4 +302,31 @@ extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
272 302
273#define ttm_base_object_kfree(__object, __base)\ 303#define ttm_base_object_kfree(__object, __base)\
274 kfree_rcu(__object, __base.rhead) 304 kfree_rcu(__object, __base.rhead)
305
306extern int ttm_prime_object_init(struct ttm_object_file *tfile,
307 size_t size,
308 struct ttm_prime_object *prime,
309 bool shareable,
310 enum ttm_object_type type,
311 void (*refcount_release)
312 (struct ttm_base_object **),
313 void (*ref_obj_release)
314 (struct ttm_base_object *,
315 enum ttm_ref_type ref_type));
316
317static inline enum ttm_object_type
318ttm_base_object_type(struct ttm_base_object *base)
319{
320 return (base->object_type == ttm_prime_type) ?
321 container_of(base, struct ttm_prime_object, base)->real_type :
322 base->object_type;
323}
324extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
325 int fd, u32 *handle);
326extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
327 uint32_t handle, uint32_t flags,
328 int *prime_fd);
329
330#define ttm_prime_object_kfree(__obj, __prime) \
331 kfree_rcu(__obj, __prime.base.rhead)
275#endif 332#endif
diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h
new file mode 100644
index 000000000000..d69bc8af3292
--- /dev/null
+++ b/include/keys/big_key-type.h
@@ -0,0 +1,25 @@
1/* Big capacity key type.
2 *
3 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _KEYS_BIG_KEY_TYPE_H
13#define _KEYS_BIG_KEY_TYPE_H
14
15#include <linux/key-type.h>
16
17extern struct key_type key_type_big_key;
18
19extern int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
20extern void big_key_revoke(struct key *key);
21extern void big_key_destroy(struct key *key);
22extern void big_key_describe(const struct key *big_key, struct seq_file *m);
23extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen);
24
25#endif /* _KEYS_BIG_KEY_TYPE_H */
diff --git a/include/keys/keyring-type.h b/include/keys/keyring-type.h
index cf49159b0e3a..fca5c62340a4 100644
--- a/include/keys/keyring-type.h
+++ b/include/keys/keyring-type.h
@@ -1,6 +1,6 @@
1/* Keyring key type 1/* Keyring key type
2 * 2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2008, 2013 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -13,19 +13,6 @@
13#define _KEYS_KEYRING_TYPE_H 13#define _KEYS_KEYRING_TYPE_H
14 14
15#include <linux/key.h> 15#include <linux/key.h>
16#include <linux/rcupdate.h> 16#include <linux/assoc_array.h>
17
18/*
19 * the keyring payload contains a list of the keys to which the keyring is
20 * subscribed
21 */
22struct keyring_list {
23 struct rcu_head rcu; /* RCU deletion hook */
24 unsigned short maxkeys; /* max keys this list can hold */
25 unsigned short nkeys; /* number of keys currently held */
26 unsigned short delkey; /* key to be unlinked by RCU */
27 struct key __rcu *keys[0];
28};
29
30 17
31#endif /* _KEYS_KEYRING_TYPE_H */ 18#endif /* _KEYS_KEYRING_TYPE_H */
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
new file mode 100644
index 000000000000..8dabc399bd1d
--- /dev/null
+++ b/include/keys/system_keyring.h
@@ -0,0 +1,23 @@
1/* System keyring containing trusted public keys.
2 *
3 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _KEYS_SYSTEM_KEYRING_H
13#define _KEYS_SYSTEM_KEYRING_H
14
15#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
16
17#include <linux/key.h>
18
19extern struct key *system_trusted_keyring;
20
21#endif
22
23#endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b0972c4ce81c..d9099b15b472 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -44,6 +44,20 @@
44#include <acpi/acpi_numa.h> 44#include <acpi/acpi_numa.h>
45#include <asm/acpi.h> 45#include <asm/acpi.h>
46 46
47static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
48{
49 return adev ? adev->handle : NULL;
50}
51
52#define ACPI_COMPANION(dev) ((dev)->acpi_node.companion)
53#define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev)
54#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
55
56static inline const char *acpi_dev_name(struct acpi_device *adev)
57{
58 return dev_name(&adev->dev);
59}
60
47enum acpi_irq_model_id { 61enum acpi_irq_model_id {
48 ACPI_IRQ_MODEL_PIC = 0, 62 ACPI_IRQ_MODEL_PIC = 0,
49 ACPI_IRQ_MODEL_IOAPIC, 63 ACPI_IRQ_MODEL_IOAPIC,
@@ -401,6 +415,15 @@ static inline bool acpi_driver_match_device(struct device *dev,
401 415
402#define acpi_disabled 1 416#define acpi_disabled 1
403 417
418#define ACPI_COMPANION(dev) (NULL)
419#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
420#define ACPI_HANDLE(dev) (NULL)
421
422static inline const char *acpi_dev_name(struct acpi_device *adev)
423{
424 return NULL;
425}
426
404static inline void acpi_early_init(void) { } 427static inline void acpi_early_init(void) { }
405 428
406static inline int early_acpi_boot_init(void) 429static inline int early_acpi_boot_init(void)
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
new file mode 100644
index 000000000000..a89df3be1686
--- /dev/null
+++ b/include/linux/assoc_array.h
@@ -0,0 +1,92 @@
1/* Generic associative array implementation.
2 *
3 * See Documentation/assoc_array.txt for information.
4 *
5 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
6 * Written by David Howells (dhowells@redhat.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public Licence
10 * as published by the Free Software Foundation; either version
11 * 2 of the Licence, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_ASSOC_ARRAY_H
15#define _LINUX_ASSOC_ARRAY_H
16
17#ifdef CONFIG_ASSOCIATIVE_ARRAY
18
19#include <linux/types.h>
20
21#define ASSOC_ARRAY_KEY_CHUNK_SIZE BITS_PER_LONG /* Key data retrieved in chunks of this size */
22
23/*
24 * Generic associative array.
25 */
26struct assoc_array {
27 struct assoc_array_ptr *root; /* The node at the root of the tree */
28 unsigned long nr_leaves_on_tree;
29};
30
31/*
32 * Operations on objects and index keys for use by array manipulation routines.
33 */
34struct assoc_array_ops {
35 /* Method to get a chunk of an index key from caller-supplied data */
36 unsigned long (*get_key_chunk)(const void *index_key, int level);
37
38 /* Method to get a piece of an object's index key */
39 unsigned long (*get_object_key_chunk)(const void *object, int level);
40
41 /* Is this the object we're looking for? */
42 bool (*compare_object)(const void *object, const void *index_key);
43
44 /* How different is an object from an index key, to a bit position in
45 * their keys? (or -1 if they're the same)
46 */
47 int (*diff_objects)(const void *object, const void *index_key);
48
49 /* Method to free an object. */
50 void (*free_object)(void *object);
51};
52
53/*
54 * Access and manipulation functions.
55 */
56struct assoc_array_edit;
57
58static inline void assoc_array_init(struct assoc_array *array)
59{
60 array->root = NULL;
61 array->nr_leaves_on_tree = 0;
62}
63
64extern int assoc_array_iterate(const struct assoc_array *array,
65 int (*iterator)(const void *object,
66 void *iterator_data),
67 void *iterator_data);
68extern void *assoc_array_find(const struct assoc_array *array,
69 const struct assoc_array_ops *ops,
70 const void *index_key);
71extern void assoc_array_destroy(struct assoc_array *array,
72 const struct assoc_array_ops *ops);
73extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
74 const struct assoc_array_ops *ops,
75 const void *index_key,
76 void *object);
77extern void assoc_array_insert_set_object(struct assoc_array_edit *edit,
78 void *object);
79extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
80 const struct assoc_array_ops *ops,
81 const void *index_key);
82extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
83 const struct assoc_array_ops *ops);
84extern void assoc_array_apply_edit(struct assoc_array_edit *edit);
85extern void assoc_array_cancel_edit(struct assoc_array_edit *edit);
86extern int assoc_array_gc(struct assoc_array *array,
87 const struct assoc_array_ops *ops,
88 bool (*iterator)(void *object, void *iterator_data),
89 void *iterator_data);
90
91#endif /* CONFIG_ASSOCIATIVE_ARRAY */
92#endif /* _LINUX_ASSOC_ARRAY_H */
diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h
new file mode 100644
index 000000000000..711275e6681c
--- /dev/null
+++ b/include/linux/assoc_array_priv.h
@@ -0,0 +1,182 @@
1/* Private definitions for the generic associative array implementation.
2 *
3 * See Documentation/assoc_array.txt for information.
4 *
5 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
6 * Written by David Howells (dhowells@redhat.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public Licence
10 * as published by the Free Software Foundation; either version
11 * 2 of the Licence, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_ASSOC_ARRAY_PRIV_H
15#define _LINUX_ASSOC_ARRAY_PRIV_H
16
17#ifdef CONFIG_ASSOCIATIVE_ARRAY
18
19#include <linux/assoc_array.h>
20
21#define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */
22#define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1)
23#define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT))
24#define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1)
25#define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1)
26#define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG))
27
28/*
29 * Undefined type representing a pointer with type information in the bottom
30 * two bits.
31 */
32struct assoc_array_ptr;
33
34/*
35 * An N-way node in the tree.
36 *
37 * Each slot contains one of four things:
38 *
39 * (1) Nothing (NULL).
40 *
41 * (2) A leaf object (pointer types 0).
42 *
43 * (3) A next-level node (pointer type 1, subtype 0).
44 *
45 * (4) A shortcut (pointer type 1, subtype 1).
46 *
47 * The tree is optimised for search-by-ID, but permits reasonable iteration
48 * also.
49 *
50 * The tree is navigated by constructing an index key consisting of an array of
51 * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size.
52 *
53 * The segments correspond to levels of the tree (the first segment is used at
54 * level 0, the second at level 1, etc.).
55 */
56struct assoc_array_node {
57 struct assoc_array_ptr *back_pointer;
58 u8 parent_slot;
59 struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT];
60 unsigned long nr_leaves_on_branch;
61};
62
63/*
64 * A shortcut through the index space out to where a collection of nodes/leaves
65 * with the same IDs live.
66 */
67struct assoc_array_shortcut {
68 struct assoc_array_ptr *back_pointer;
69 int parent_slot;
70 int skip_to_level;
71 struct assoc_array_ptr *next_node;
72 unsigned long index_key[];
73};
74
75/*
76 * Preallocation cache.
77 */
78struct assoc_array_edit {
79 struct rcu_head rcu;
80 struct assoc_array *array;
81 const struct assoc_array_ops *ops;
82 const struct assoc_array_ops *ops_for_excised_subtree;
83 struct assoc_array_ptr *leaf;
84 struct assoc_array_ptr **leaf_p;
85 struct assoc_array_ptr *dead_leaf;
86 struct assoc_array_ptr *new_meta[3];
87 struct assoc_array_ptr *excised_meta[1];
88 struct assoc_array_ptr *excised_subtree;
89 struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT];
90 struct assoc_array_ptr *set_backpointers_to;
91 struct assoc_array_node *adjust_count_on;
92 long adjust_count_by;
93 struct {
94 struct assoc_array_ptr **ptr;
95 struct assoc_array_ptr *to;
96 } set[2];
97 struct {
98 u8 *p;
99 u8 to;
100 } set_parent_slot[1];
101 u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1];
102};
103
104/*
105 * Internal tree member pointers are marked in the bottom one or two bits to
106 * indicate what type they are so that we don't have to look behind every
107 * pointer to see what it points to.
108 *
109 * We provide functions to test type annotations and to create and translate
110 * the annotated pointers.
111 */
112#define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL
113#define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */
114#define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */
115#define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL
116#define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL
117#define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL
118
119static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x)
120{
121 return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK;
122}
123static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x)
124{
125 return !assoc_array_ptr_is_meta(x);
126}
127static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x)
128{
129 return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK;
130}
131static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x)
132{
133 return !assoc_array_ptr_is_shortcut(x);
134}
135
136static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x)
137{
138 return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK);
139}
140
141static inline
142unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x)
143{
144 return (unsigned long)x &
145 ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK);
146}
147static inline
148struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x)
149{
150 return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x);
151}
152static inline
153struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x)
154{
155 return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x);
156}
157
158static inline
159struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t)
160{
161 return (struct assoc_array_ptr *)((unsigned long)p | t);
162}
163static inline
164struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p)
165{
166 return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE);
167}
168static inline
169struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p)
170{
171 return __assoc_array_x_to_ptr(
172 p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE);
173}
174static inline
175struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p)
176{
177 return __assoc_array_x_to_ptr(
178 p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE);
179}
180
181#endif /* CONFIG_ASSOCIATIVE_ARRAY */
182#endif /* _LINUX_ASSOC_ARRAY_PRIV_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 729a4d165bcc..a40641954c29 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -73,6 +73,8 @@ struct audit_field {
73 void *lsm_rule; 73 void *lsm_rule;
74}; 74};
75 75
76extern int is_audit_feature_set(int which);
77
76extern int __init audit_register_class(int class, unsigned *list); 78extern int __init audit_register_class(int class, unsigned *list);
77extern int audit_classify_syscall(int abi, unsigned syscall); 79extern int audit_classify_syscall(int abi, unsigned syscall);
78extern int audit_classify_arch(int arch); 80extern int audit_classify_arch(int arch);
@@ -207,7 +209,7 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
207 209
208extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); 210extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
209extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); 211extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
210extern int __audit_bprm(struct linux_binprm *bprm); 212extern void __audit_bprm(struct linux_binprm *bprm);
211extern int __audit_socketcall(int nargs, unsigned long *args); 213extern int __audit_socketcall(int nargs, unsigned long *args);
212extern int __audit_sockaddr(int len, void *addr); 214extern int __audit_sockaddr(int len, void *addr);
213extern void __audit_fd_pair(int fd1, int fd2); 215extern void __audit_fd_pair(int fd1, int fd2);
@@ -236,11 +238,10 @@ static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid
236 if (unlikely(!audit_dummy_context())) 238 if (unlikely(!audit_dummy_context()))
237 __audit_ipc_set_perm(qbytes, uid, gid, mode); 239 __audit_ipc_set_perm(qbytes, uid, gid, mode);
238} 240}
239static inline int audit_bprm(struct linux_binprm *bprm) 241static inline void audit_bprm(struct linux_binprm *bprm)
240{ 242{
241 if (unlikely(!audit_dummy_context())) 243 if (unlikely(!audit_dummy_context()))
242 return __audit_bprm(bprm); 244 __audit_bprm(bprm);
243 return 0;
244} 245}
245static inline int audit_socketcall(int nargs, unsigned long *args) 246static inline int audit_socketcall(int nargs, unsigned long *args)
246{ 247{
@@ -367,10 +368,8 @@ static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
367static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, 368static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
368 gid_t gid, umode_t mode) 369 gid_t gid, umode_t mode)
369{ } 370{ }
370static inline int audit_bprm(struct linux_binprm *bprm) 371static inline void audit_bprm(struct linux_binprm *bprm)
371{ 372{ }
372 return 0;
373}
374static inline int audit_socketcall(int nargs, unsigned long *args) 373static inline int audit_socketcall(int nargs, unsigned long *args)
375{ 374{
376 return 0; 375 return 0;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f26ec20f6354..1b135d49b279 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -505,6 +505,9 @@ struct request_queue {
505 (1 << QUEUE_FLAG_SAME_COMP) | \ 505 (1 << QUEUE_FLAG_SAME_COMP) | \
506 (1 << QUEUE_FLAG_ADD_RANDOM)) 506 (1 << QUEUE_FLAG_ADD_RANDOM))
507 507
508#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
509 (1 << QUEUE_FLAG_SAME_COMP))
510
508static inline void queue_lockdep_assert_held(struct request_queue *q) 511static inline void queue_lockdep_assert_held(struct request_queue *q)
509{ 512{
510 if (q->queue_lock) 513 if (q->queue_lock)
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 973ce10c40b6..dc1bd3dcf11f 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -28,8 +28,6 @@
28 28
29#endif 29#endif
30 30
31#define uninitialized_var(x) x
32
33#ifndef __HAVE_BUILTIN_BSWAP16__ 31#ifndef __HAVE_BUILTIN_BSWAP16__
34/* icc has this, but it's called _bswap16 */ 32/* icc has this, but it's called _bswap16 */
35#define __HAVE_BUILTIN_BSWAP16__ 33#define __HAVE_BUILTIN_BSWAP16__
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 57e87e749a48..bf72e9ac6de0 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -29,8 +29,10 @@ struct vfsmount;
29/* The hash is always the low bits of hash_len */ 29/* The hash is always the low bits of hash_len */
30#ifdef __LITTLE_ENDIAN 30#ifdef __LITTLE_ENDIAN
31 #define HASH_LEN_DECLARE u32 hash; u32 len; 31 #define HASH_LEN_DECLARE u32 hash; u32 len;
32 #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8))
32#else 33#else
33 #define HASH_LEN_DECLARE u32 len; u32 hash; 34 #define HASH_LEN_DECLARE u32 len; u32 hash;
35 #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8))
34#endif 36#endif
35 37
36/* 38/*
diff --git a/include/linux/device.h b/include/linux/device.h
index b025925df7f7..952b01033c32 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -644,9 +644,11 @@ struct device_dma_parameters {
644 unsigned long segment_boundary_mask; 644 unsigned long segment_boundary_mask;
645}; 645};
646 646
647struct acpi_device;
648
647struct acpi_dev_node { 649struct acpi_dev_node {
648#ifdef CONFIG_ACPI 650#ifdef CONFIG_ACPI
649 void *handle; 651 struct acpi_device *companion;
650#endif 652#endif
651}; 653};
652 654
@@ -790,14 +792,6 @@ static inline struct device *kobj_to_dev(struct kobject *kobj)
790 return container_of(kobj, struct device, kobj); 792 return container_of(kobj, struct device, kobj);
791} 793}
792 794
793#ifdef CONFIG_ACPI
794#define ACPI_HANDLE(dev) ((dev)->acpi_node.handle)
795#define ACPI_HANDLE_SET(dev, _handle_) (dev)->acpi_node.handle = (_handle_)
796#else
797#define ACPI_HANDLE(dev) (NULL)
798#define ACPI_HANDLE_SET(dev, _handle_) do { } while (0)
799#endif
800
801/* Get the wakeup routines, which depend on struct device */ 795/* Get the wakeup routines, which depend on struct device */
802#include <linux/pm_wakeup.h> 796#include <linux/pm_wakeup.h>
803 797
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 0bc727534108..41cf0c399288 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie)
45 45
46/** 46/**
47 * enum dma_status - DMA transaction status 47 * enum dma_status - DMA transaction status
48 * @DMA_SUCCESS: transaction completed successfully 48 * @DMA_COMPLETE: transaction completed
49 * @DMA_IN_PROGRESS: transaction not yet processed 49 * @DMA_IN_PROGRESS: transaction not yet processed
50 * @DMA_PAUSED: transaction is paused 50 * @DMA_PAUSED: transaction is paused
51 * @DMA_ERROR: transaction failed 51 * @DMA_ERROR: transaction failed
52 */ 52 */
53enum dma_status { 53enum dma_status {
54 DMA_SUCCESS, 54 DMA_COMPLETE,
55 DMA_IN_PROGRESS, 55 DMA_IN_PROGRESS,
56 DMA_PAUSED, 56 DMA_PAUSED,
57 DMA_ERROR, 57 DMA_ERROR,
@@ -171,12 +171,6 @@ struct dma_interleaved_template {
171 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client 171 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
172 * acknowledges receipt, i.e. has has a chance to establish any dependency 172 * acknowledges receipt, i.e. has has a chance to establish any dependency
173 * chains 173 * chains
174 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
175 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
176 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
177 * (if not set, do the source dma-unmapping as page)
178 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
179 * (if not set, do the destination dma-unmapping as page)
180 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q 174 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
181 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P 175 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
182 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as 176 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
@@ -188,14 +182,10 @@ struct dma_interleaved_template {
188enum dma_ctrl_flags { 182enum dma_ctrl_flags {
189 DMA_PREP_INTERRUPT = (1 << 0), 183 DMA_PREP_INTERRUPT = (1 << 0),
190 DMA_CTRL_ACK = (1 << 1), 184 DMA_CTRL_ACK = (1 << 1),
191 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), 185 DMA_PREP_PQ_DISABLE_P = (1 << 2),
192 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), 186 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
193 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), 187 DMA_PREP_CONTINUE = (1 << 4),
194 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), 188 DMA_PREP_FENCE = (1 << 5),
195 DMA_PREP_PQ_DISABLE_P = (1 << 6),
196 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
197 DMA_PREP_CONTINUE = (1 << 8),
198 DMA_PREP_FENCE = (1 << 9),
199}; 189};
200 190
201/** 191/**
@@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref);
413typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 403typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
414 404
415typedef void (*dma_async_tx_callback)(void *dma_async_param); 405typedef void (*dma_async_tx_callback)(void *dma_async_param);
406
407struct dmaengine_unmap_data {
408 u8 to_cnt;
409 u8 from_cnt;
410 u8 bidi_cnt;
411 struct device *dev;
412 struct kref kref;
413 size_t len;
414 dma_addr_t addr[0];
415};
416
416/** 417/**
417 * struct dma_async_tx_descriptor - async transaction descriptor 418 * struct dma_async_tx_descriptor - async transaction descriptor
418 * ---dma generic offload fields--- 419 * ---dma generic offload fields---
@@ -438,6 +439,7 @@ struct dma_async_tx_descriptor {
438 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 439 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
439 dma_async_tx_callback callback; 440 dma_async_tx_callback callback;
440 void *callback_param; 441 void *callback_param;
442 struct dmaengine_unmap_data *unmap;
441#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 443#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
442 struct dma_async_tx_descriptor *next; 444 struct dma_async_tx_descriptor *next;
443 struct dma_async_tx_descriptor *parent; 445 struct dma_async_tx_descriptor *parent;
@@ -445,6 +447,40 @@ struct dma_async_tx_descriptor {
445#endif 447#endif
446}; 448};
447 449
450#ifdef CONFIG_DMA_ENGINE
451static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
452 struct dmaengine_unmap_data *unmap)
453{
454 kref_get(&unmap->kref);
455 tx->unmap = unmap;
456}
457
458struct dmaengine_unmap_data *
459dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
460void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
461#else
462static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
463 struct dmaengine_unmap_data *unmap)
464{
465}
466static inline struct dmaengine_unmap_data *
467dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
468{
469 return NULL;
470}
471static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
472{
473}
474#endif
475
476static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
477{
478 if (tx->unmap) {
479 dmaengine_unmap_put(tx->unmap);
480 tx->unmap = NULL;
481 }
482}
483
448#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 484#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
449static inline void txd_lock(struct dma_async_tx_descriptor *txd) 485static inline void txd_lock(struct dma_async_tx_descriptor *txd)
450{ 486{
@@ -979,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
979{ 1015{
980 if (last_complete <= last_used) { 1016 if (last_complete <= last_used) {
981 if ((cookie <= last_complete) || (cookie > last_used)) 1017 if ((cookie <= last_complete) || (cookie > last_used))
982 return DMA_SUCCESS; 1018 return DMA_COMPLETE;
983 } else { 1019 } else {
984 if ((cookie <= last_complete) && (cookie > last_used)) 1020 if ((cookie <= last_complete) && (cookie > last_used))
985 return DMA_SUCCESS; 1021 return DMA_COMPLETE;
986 } 1022 }
987 return DMA_IN_PROGRESS; 1023 return DMA_IN_PROGRESS;
988} 1024}
@@ -1013,11 +1049,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ
1013} 1049}
1014static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 1050static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1015{ 1051{
1016 return DMA_SUCCESS; 1052 return DMA_COMPLETE;
1017} 1053}
1018static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 1054static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1019{ 1055{
1020 return DMA_SUCCESS; 1056 return DMA_COMPLETE;
1021} 1057}
1022static inline void dma_issue_pending_all(void) 1058static inline void dma_issue_pending_all(void)
1023{ 1059{
diff --git a/include/linux/efi.h b/include/linux/efi.h
index bc5687d0f315..11ce6784a196 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -801,6 +801,8 @@ struct efivar_entry {
801 struct efi_variable var; 801 struct efi_variable var;
802 struct list_head list; 802 struct list_head list;
803 struct kobject kobj; 803 struct kobject kobj;
804 bool scanning;
805 bool deleting;
804}; 806};
805 807
806 808
@@ -866,6 +868,8 @@ void efivar_run_worker(void);
866#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE) 868#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
867int efivars_sysfs_init(void); 869int efivars_sysfs_init(void);
868 870
871#define EFIVARS_DATA_SIZE_MAX 1024
872
869#endif /* CONFIG_EFI_VARS */ 873#endif /* CONFIG_EFI_VARS */
870 874
871#endif /* _LINUX_EFI_H */ 875#endif /* _LINUX_EFI_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bf5d574ebdf4..121f11f001c0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2622,7 +2622,9 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
2622extern int simple_write_end(struct file *file, struct address_space *mapping, 2622extern int simple_write_end(struct file *file, struct address_space *mapping,
2623 loff_t pos, unsigned len, unsigned copied, 2623 loff_t pos, unsigned len, unsigned copied,
2624 struct page *page, void *fsdata); 2624 struct page *page, void *fsdata);
2625extern int always_delete_dentry(const struct dentry *);
2625extern struct inode *alloc_anon_inode(struct super_block *); 2626extern struct inode *alloc_anon_inode(struct super_block *);
2627extern const struct dentry_operations simple_dentry_operations;
2626 2628
2627extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); 2629extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
2628extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); 2630extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 023bc346b877..c0894dd8827b 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -273,49 +273,40 @@ static struct genl_family ZZZ_genl_family __read_mostly = {
273 * Magic: define multicast groups 273 * Magic: define multicast groups
274 * Magic: define multicast group registration helper 274 * Magic: define multicast group registration helper
275 */ 275 */
276#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps)
277static const struct genl_multicast_group ZZZ_genl_mcgrps[] = {
278#undef GENL_mc_group
279#define GENL_mc_group(group) { .name = #group, },
280#include GENL_MAGIC_INCLUDE_FILE
281};
282
283enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) {
284#undef GENL_mc_group
285#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group),
286#include GENL_MAGIC_INCLUDE_FILE
287};
288
276#undef GENL_mc_group 289#undef GENL_mc_group
277#define GENL_mc_group(group) \ 290#define GENL_mc_group(group) \
278static struct genl_multicast_group \
279CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group) __read_mostly = { \
280 .name = #group, \
281}; \
282static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \ 291static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
283 struct sk_buff *skb, gfp_t flags) \ 292 struct sk_buff *skb, gfp_t flags) \
284{ \ 293{ \
285 unsigned int group_id = \ 294 unsigned int group_id = \
286 CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id; \ 295 CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \
287 if (!group_id) \ 296 return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \
288 return -EINVAL; \ 297 group_id, flags); \
289 return genlmsg_multicast(skb, 0, group_id, flags); \
290} 298}
291 299
292#include GENL_MAGIC_INCLUDE_FILE 300#include GENL_MAGIC_INCLUDE_FILE
293 301
294int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
295{
296 int err = genl_register_family_with_ops(&ZZZ_genl_family,
297 ZZZ_genl_ops, ARRAY_SIZE(ZZZ_genl_ops));
298 if (err)
299 return err;
300#undef GENL_mc_group
301#define GENL_mc_group(group) \
302 err = genl_register_mc_group(&ZZZ_genl_family, \
303 &CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group)); \
304 if (err) \
305 goto fail; \
306 else \
307 pr_info("%s: mcg %s: %u\n", #group, \
308 __stringify(GENL_MAGIC_FAMILY), \
309 CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id);
310
311#include GENL_MAGIC_INCLUDE_FILE
312
313#undef GENL_mc_group 302#undef GENL_mc_group
314#define GENL_mc_group(group) 303#define GENL_mc_group(group)
315 return 0; 304
316fail: 305int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
317 genl_unregister_family(&ZZZ_genl_family); 306{
318 return err; 307 return genl_register_family_with_ops_groups(&ZZZ_genl_family, \
308 ZZZ_genl_ops, \
309 ZZZ_genl_mcgrps);
319} 310}
320 311
321void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void) 312void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 656a27efb2c8..3ea2cf6b0e6c 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -2,9 +2,12 @@
2#define __LINUX_GPIO_DRIVER_H 2#define __LINUX_GPIO_DRIVER_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/module.h>
5 6
6struct device; 7struct device;
7struct gpio_desc; 8struct gpio_desc;
9struct of_phandle_args;
10struct device_node;
8struct seq_file; 11struct seq_file;
9 12
10/** 13/**
@@ -125,6 +128,13 @@ extern struct gpio_chip *gpiochip_find(void *data,
125int gpiod_lock_as_irq(struct gpio_desc *desc); 128int gpiod_lock_as_irq(struct gpio_desc *desc);
126void gpiod_unlock_as_irq(struct gpio_desc *desc); 129void gpiod_unlock_as_irq(struct gpio_desc *desc);
127 130
131enum gpio_lookup_flags {
132 GPIO_ACTIVE_HIGH = (0 << 0),
133 GPIO_ACTIVE_LOW = (1 << 0),
134 GPIO_OPEN_DRAIN = (1 << 1),
135 GPIO_OPEN_SOURCE = (1 << 2),
136};
137
128/** 138/**
129 * Lookup table for associating GPIOs to specific devices and functions using 139 * Lookup table for associating GPIOs to specific devices and functions using
130 * platform data. 140 * platform data.
@@ -152,9 +162,9 @@ struct gpiod_lookup {
152 */ 162 */
153 unsigned int idx; 163 unsigned int idx;
154 /* 164 /*
155 * mask of GPIOF_* values 165 * mask of GPIO_* values
156 */ 166 */
157 unsigned long flags; 167 enum gpio_lookup_flags flags;
158}; 168};
159 169
160/* 170/*
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index a265af294ea4..b914ca3f57ba 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -21,6 +21,8 @@
21 21
22#include <linux/hid.h> 22#include <linux/hid.h>
23#include <linux/hid-sensor-ids.h> 23#include <linux/hid-sensor-ids.h>
24#include <linux/iio/iio.h>
25#include <linux/iio/trigger.h>
24 26
25/** 27/**
26 * struct hid_sensor_hub_attribute_info - Attribute info 28 * struct hid_sensor_hub_attribute_info - Attribute info
@@ -40,6 +42,8 @@ struct hid_sensor_hub_attribute_info {
40 s32 units; 42 s32 units;
41 s32 unit_expo; 43 s32 unit_expo;
42 s32 size; 44 s32 size;
45 s32 logical_minimum;
46 s32 logical_maximum;
43}; 47};
44 48
45/** 49/**
@@ -184,6 +188,7 @@ struct hid_sensor_common {
184 struct platform_device *pdev; 188 struct platform_device *pdev;
185 unsigned usage_id; 189 unsigned usage_id;
186 bool data_ready; 190 bool data_ready;
191 struct iio_trigger *trigger;
187 struct hid_sensor_hub_attribute_info poll; 192 struct hid_sensor_hub_attribute_info poll;
188 struct hid_sensor_hub_attribute_info report_state; 193 struct hid_sensor_hub_attribute_info report_state;
189 struct hid_sensor_hub_attribute_info power_state; 194 struct hid_sensor_hub_attribute_info power_state;
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 4f945d3ed49f..8323775ac21d 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -117,4 +117,16 @@
117#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316 117#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316
118#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319 118#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319
119 119
120/* Power state enumerations */
121#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x00
122#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x01
123#define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x02
124#define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x03
125#define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x04
126#define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x05
127
128/* Report State enumerations */
129#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x00
130#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x01
131
120#endif 132#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index acd2010328f3..bd7e98752222 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
31void hugepage_put_subpool(struct hugepage_subpool *spool); 31void hugepage_put_subpool(struct hugepage_subpool *spool);
32 32
33int PageHuge(struct page *page); 33int PageHuge(struct page *page);
34int PageHeadHuge(struct page *page_head);
34 35
35void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 36void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
36int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 37int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -69,7 +70,6 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
69bool isolate_huge_page(struct page *page, struct list_head *list); 70bool isolate_huge_page(struct page *page, struct list_head *list);
70void putback_active_hugepage(struct page *page); 71void putback_active_hugepage(struct page *page);
71bool is_hugepage_active(struct page *page); 72bool is_hugepage_active(struct page *page);
72void copy_huge_page(struct page *dst, struct page *src);
73 73
74#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 74#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
75pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 75pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -104,6 +104,11 @@ static inline int PageHuge(struct page *page)
104 return 0; 104 return 0;
105} 105}
106 106
107static inline int PageHeadHuge(struct page *page_head)
108{
109 return 0;
110}
111
107static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 112static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
108{ 113{
109} 114}
@@ -137,12 +142,12 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
137 return 0; 142 return 0;
138} 143}
139 144
140#define isolate_huge_page(p, l) false 145static inline bool isolate_huge_page(struct page *page, struct list_head *list)
141#define putback_active_hugepage(p) do {} while (0)
142#define is_hugepage_active(x) false
143static inline void copy_huge_page(struct page *dst, struct page *src)
144{ 146{
147 return false;
145} 148}
149#define putback_active_hugepage(p) do {} while (0)
150#define is_hugepage_active(x) false
146 151
147static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 152static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
148 unsigned long address, unsigned long end, pgprot_t newprot) 153 unsigned long address, unsigned long end, pgprot_t newprot)
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index c2702856295e..84ba5ac39e03 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -119,4 +119,21 @@ extern int macvlan_link_register(struct rtnl_link_ops *ops);
119extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 119extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
120 struct net_device *dev); 120 struct net_device *dev);
121 121
122#if IS_ENABLED(CONFIG_MACVLAN)
123static inline struct net_device *
124macvlan_dev_real_dev(const struct net_device *dev)
125{
126 struct macvlan_dev *macvlan = netdev_priv(dev);
127
128 return macvlan->lowerdev;
129}
130#else
131static inline struct net_device *
132macvlan_dev_real_dev(const struct net_device *dev)
133{
134 BUG();
135 return NULL;
136}
137#endif
138
122#endif /* _LINUX_IF_MACVLAN_H */ 139#endif /* _LINUX_IF_MACVLAN_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5d89d1b808a6..c56c350324e4 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -4,6 +4,7 @@
4#include <uapi/linux/ipv6.h> 4#include <uapi/linux/ipv6.h>
5 5
6#define ipv6_optlen(p) (((p)->hdrlen+1) << 3) 6#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
7#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
7/* 8/*
8 * This structure contains configuration options per IPv6 link. 9 * This structure contains configuration options per IPv6 link.
9 */ 10 */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 56bb0dc8b7d4..7dc10036eff5 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -70,6 +70,9 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
70 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context 70 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
71 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread 71 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
72 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable 72 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
73 * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
74 * it from the spurious interrupt detection
75 * mechanism and from core side polling.
73 */ 76 */
74enum { 77enum {
75 IRQ_TYPE_NONE = 0x00000000, 78 IRQ_TYPE_NONE = 0x00000000,
@@ -94,12 +97,14 @@ enum {
94 IRQ_NESTED_THREAD = (1 << 15), 97 IRQ_NESTED_THREAD = (1 << 15),
95 IRQ_NOTHREAD = (1 << 16), 98 IRQ_NOTHREAD = (1 << 16),
96 IRQ_PER_CPU_DEVID = (1 << 17), 99 IRQ_PER_CPU_DEVID = (1 << 17),
100 IRQ_IS_POLLED = (1 << 18),
97}; 101};
98 102
99#define IRQF_MODIFY_MASK \ 103#define IRQF_MODIFY_MASK \
100 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ 104 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
101 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ 105 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
102 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) 106 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
107 IRQ_IS_POLLED)
103 108
104#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) 109#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
105 110
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index 714ba08dc092..e374e369fb2f 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -14,6 +14,6 @@ enum irqreturn {
14}; 14};
15 15
16typedef enum irqreturn irqreturn_t; 16typedef enum irqreturn irqreturn_t;
17#define IRQ_RETVAL(x) ((x) != IRQ_NONE) 17#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE)
18 18
19#endif 19#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d4e98d13eff4..ecb87544cc5d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -193,7 +193,8 @@ extern int _cond_resched(void);
193 (__x < 0) ? -__x : __x; \ 193 (__x < 0) ? -__x : __x; \
194 }) 194 })
195 195
196#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 196#if defined(CONFIG_MMU) && \
197 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
197void might_fault(void); 198void might_fault(void);
198#else 199#else
199static inline void might_fault(void) { } 200static inline void might_fault(void) { }
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d78d28a733b1..5fd33dc1fe3a 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
198extern size_t vmcoreinfo_size; 198extern size_t vmcoreinfo_size;
199extern size_t vmcoreinfo_max_size; 199extern size_t vmcoreinfo_max_size;
200 200
201/* flag to track if kexec reboot is in progress */
202extern bool kexec_in_progress;
203
201int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, 204int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
202 unsigned long long *crash_size, unsigned long long *crash_base); 205 unsigned long long *crash_size, unsigned long long *crash_base);
203int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, 206int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 518a53afb9ea..a74c3a84dfdd 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,6 +45,7 @@ struct key_preparsed_payload {
45 const void *data; /* Raw data */ 45 const void *data; /* Raw data */
46 size_t datalen; /* Raw datalen */ 46 size_t datalen; /* Raw datalen */
47 size_t quotalen; /* Quota length for proposed payload */ 47 size_t quotalen; /* Quota length for proposed payload */
48 bool trusted; /* True if key is trusted */
48}; 49};
49 50
50typedef int (*request_key_actor_t)(struct key_construction *key, 51typedef int (*request_key_actor_t)(struct key_construction *key,
@@ -63,6 +64,11 @@ struct key_type {
63 */ 64 */
64 size_t def_datalen; 65 size_t def_datalen;
65 66
67 /* Default key search algorithm. */
68 unsigned def_lookup_type;
69#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
70#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
71
66 /* vet a description */ 72 /* vet a description */
67 int (*vet_description)(const char *description); 73 int (*vet_description)(const char *description);
68 74
diff --git a/include/linux/key.h b/include/linux/key.h
index 4dfde1161c5e..80d677483e31 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -22,6 +22,7 @@
22#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/rwsem.h> 23#include <linux/rwsem.h>
24#include <linux/atomic.h> 24#include <linux/atomic.h>
25#include <linux/assoc_array.h>
25 26
26#ifdef __KERNEL__ 27#ifdef __KERNEL__
27#include <linux/uidgid.h> 28#include <linux/uidgid.h>
@@ -82,6 +83,12 @@ struct key_owner;
82struct keyring_list; 83struct keyring_list;
83struct keyring_name; 84struct keyring_name;
84 85
86struct keyring_index_key {
87 struct key_type *type;
88 const char *description;
89 size_t desc_len;
90};
91
85/*****************************************************************************/ 92/*****************************************************************************/
86/* 93/*
87 * key reference with possession attribute handling 94 * key reference with possession attribute handling
@@ -99,7 +106,7 @@ struct keyring_name;
99typedef struct __key_reference_with_attributes *key_ref_t; 106typedef struct __key_reference_with_attributes *key_ref_t;
100 107
101static inline key_ref_t make_key_ref(const struct key *key, 108static inline key_ref_t make_key_ref(const struct key *key,
102 unsigned long possession) 109 bool possession)
103{ 110{
104 return (key_ref_t) ((unsigned long) key | possession); 111 return (key_ref_t) ((unsigned long) key | possession);
105} 112}
@@ -109,7 +116,7 @@ static inline struct key *key_ref_to_ptr(const key_ref_t key_ref)
109 return (struct key *) ((unsigned long) key_ref & ~1UL); 116 return (struct key *) ((unsigned long) key_ref & ~1UL);
110} 117}
111 118
112static inline unsigned long is_key_possessed(const key_ref_t key_ref) 119static inline bool is_key_possessed(const key_ref_t key_ref)
113{ 120{
114 return (unsigned long) key_ref & 1UL; 121 return (unsigned long) key_ref & 1UL;
115} 122}
@@ -129,7 +136,6 @@ struct key {
129 struct list_head graveyard_link; 136 struct list_head graveyard_link;
130 struct rb_node serial_node; 137 struct rb_node serial_node;
131 }; 138 };
132 struct key_type *type; /* type of key */
133 struct rw_semaphore sem; /* change vs change sem */ 139 struct rw_semaphore sem; /* change vs change sem */
134 struct key_user *user; /* owner of this key */ 140 struct key_user *user; /* owner of this key */
135 void *security; /* security data for this key */ 141 void *security; /* security data for this key */
@@ -162,13 +168,21 @@ struct key {
162#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ 168#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
163#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ 169#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
164#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ 170#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
171#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */
172#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
165 173
166 /* the description string 174 /* the key type and key description string
167 * - this is used to match a key against search criteria 175 * - the desc is used to match a key against search criteria
168 * - this should be a printable string 176 * - it should be a printable string
169 * - eg: for krb5 AFS, this might be "afs@REDHAT.COM" 177 * - eg: for krb5 AFS, this might be "afs@REDHAT.COM"
170 */ 178 */
171 char *description; 179 union {
180 struct keyring_index_key index_key;
181 struct {
182 struct key_type *type; /* type of key */
183 char *description;
184 };
185 };
172 186
173 /* type specific data 187 /* type specific data
174 * - this is used by the keyring type to index the name 188 * - this is used by the keyring type to index the name
@@ -185,11 +199,14 @@ struct key {
185 * whatever 199 * whatever
186 */ 200 */
187 union { 201 union {
188 unsigned long value; 202 union {
189 void __rcu *rcudata; 203 unsigned long value;
190 void *data; 204 void __rcu *rcudata;
191 struct keyring_list __rcu *subscriptions; 205 void *data;
192 } payload; 206 void *data2[2];
207 } payload;
208 struct assoc_array keys;
209 };
193}; 210};
194 211
195extern struct key *key_alloc(struct key_type *type, 212extern struct key *key_alloc(struct key_type *type,
@@ -203,18 +220,23 @@ extern struct key *key_alloc(struct key_type *type,
203#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ 220#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
204#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ 221#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
205#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ 222#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
223#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
206 224
207extern void key_revoke(struct key *key); 225extern void key_revoke(struct key *key);
208extern void key_invalidate(struct key *key); 226extern void key_invalidate(struct key *key);
209extern void key_put(struct key *key); 227extern void key_put(struct key *key);
210 228
211static inline struct key *key_get(struct key *key) 229static inline struct key *__key_get(struct key *key)
212{ 230{
213 if (key) 231 atomic_inc(&key->usage);
214 atomic_inc(&key->usage);
215 return key; 232 return key;
216} 233}
217 234
235static inline struct key *key_get(struct key *key)
236{
237 return key ? __key_get(key) : key;
238}
239
218static inline void key_ref_put(key_ref_t key_ref) 240static inline void key_ref_put(key_ref_t key_ref)
219{ 241{
220 key_put(key_ref_to_ptr(key_ref)); 242 key_put(key_ref_to_ptr(key_ref));
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 2d0c9071bcfb..cab2dd279076 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -39,7 +39,8 @@ enum sec_device_type {
39struct sec_pmic_dev { 39struct sec_pmic_dev {
40 struct device *dev; 40 struct device *dev;
41 struct sec_platform_data *pdata; 41 struct sec_platform_data *pdata;
42 struct regmap *regmap; 42 struct regmap *regmap_pmic;
43 struct regmap *regmap_rtc;
43 struct i2c_client *i2c; 44 struct i2c_client *i2c;
44 struct i2c_client *rtc; 45 struct i2c_client *rtc;
45 46
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index ad05ce60c1c9..2e5b194b9b19 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -22,6 +22,8 @@
22#define PHY_ID_KSZ8021 0x00221555 22#define PHY_ID_KSZ8021 0x00221555
23#define PHY_ID_KSZ8031 0x00221556 23#define PHY_ID_KSZ8031 0x00221556
24#define PHY_ID_KSZ8041 0x00221510 24#define PHY_ID_KSZ8041 0x00221510
25/* undocumented */
26#define PHY_ID_KSZ8041RNLI 0x00221537
25#define PHY_ID_KSZ8051 0x00221550 27#define PHY_ID_KSZ8051 0x00221550
26/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */ 28/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */
27#define PHY_ID_KSZ8001 0x0022161A 29#define PHY_ID_KSZ8001 0x0022161A
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0548eb201e05..1cedd000cf29 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1318,7 +1318,6 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
1318 1318
1319#if USE_SPLIT_PTE_PTLOCKS 1319#if USE_SPLIT_PTE_PTLOCKS
1320#if BLOATED_SPINLOCKS 1320#if BLOATED_SPINLOCKS
1321void __init ptlock_cache_init(void);
1322extern bool ptlock_alloc(struct page *page); 1321extern bool ptlock_alloc(struct page *page);
1323extern void ptlock_free(struct page *page); 1322extern void ptlock_free(struct page *page);
1324 1323
@@ -1327,7 +1326,6 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1327 return page->ptl; 1326 return page->ptl;
1328} 1327}
1329#else /* BLOATED_SPINLOCKS */ 1328#else /* BLOATED_SPINLOCKS */
1330static inline void ptlock_cache_init(void) {}
1331static inline bool ptlock_alloc(struct page *page) 1329static inline bool ptlock_alloc(struct page *page)
1332{ 1330{
1333 return true; 1331 return true;
@@ -1380,17 +1378,10 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1380{ 1378{
1381 return &mm->page_table_lock; 1379 return &mm->page_table_lock;
1382} 1380}
1383static inline void ptlock_cache_init(void) {}
1384static inline bool ptlock_init(struct page *page) { return true; } 1381static inline bool ptlock_init(struct page *page) { return true; }
1385static inline void pte_lock_deinit(struct page *page) {} 1382static inline void pte_lock_deinit(struct page *page) {}
1386#endif /* USE_SPLIT_PTE_PTLOCKS */ 1383#endif /* USE_SPLIT_PTE_PTLOCKS */
1387 1384
1388static inline void pgtable_init(void)
1389{
1390 ptlock_cache_init();
1391 pgtable_cache_init();
1392}
1393
1394static inline bool pgtable_page_ctor(struct page *page) 1385static inline bool pgtable_page_ctor(struct page *page)
1395{ 1386{
1396 inc_zone_page_state(page, NR_PAGETABLE); 1387 inc_zone_page_state(page, NR_PAGETABLE);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 10f5a7272b80..bd299418a934 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -44,18 +44,22 @@ struct page {
44 /* First double word block */ 44 /* First double word block */
45 unsigned long flags; /* Atomic flags, some possibly 45 unsigned long flags; /* Atomic flags, some possibly
46 * updated asynchronously */ 46 * updated asynchronously */
47 struct address_space *mapping; /* If low bit clear, points to 47 union {
48 * inode address_space, or NULL. 48 struct address_space *mapping; /* If low bit clear, points to
49 * If page mapped as anonymous 49 * inode address_space, or NULL.
50 * memory, low bit is set, and 50 * If page mapped as anonymous
51 * it points to anon_vma object: 51 * memory, low bit is set, and
52 * see PAGE_MAPPING_ANON below. 52 * it points to anon_vma object:
53 */ 53 * see PAGE_MAPPING_ANON below.
54 */
55 void *s_mem; /* slab first object */
56 };
57
54 /* Second double word */ 58 /* Second double word */
55 struct { 59 struct {
56 union { 60 union {
57 pgoff_t index; /* Our offset within mapping. */ 61 pgoff_t index; /* Our offset within mapping. */
58 void *freelist; /* slub/slob first free object */ 62 void *freelist; /* sl[aou]b first free object */
59 bool pfmemalloc; /* If set by the page allocator, 63 bool pfmemalloc; /* If set by the page allocator,
60 * ALLOC_NO_WATERMARKS was set 64 * ALLOC_NO_WATERMARKS was set
61 * and the low watermark was not 65 * and the low watermark was not
@@ -65,9 +69,6 @@ struct page {
65 * this page is only used to 69 * this page is only used to
66 * free other pages. 70 * free other pages.
67 */ 71 */
68#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
69 pgtable_t pmd_huge_pte; /* protected by page->ptl */
70#endif
71 }; 72 };
72 73
73 union { 74 union {
@@ -114,6 +115,7 @@ struct page {
114 }; 115 };
115 atomic_t _count; /* Usage count, see below. */ 116 atomic_t _count; /* Usage count, see below. */
116 }; 117 };
118 unsigned int active; /* SLAB */
117 }; 119 };
118 }; 120 };
119 121
@@ -135,6 +137,12 @@ struct page {
135 137
136 struct list_head list; /* slobs list of pages */ 138 struct list_head list; /* slobs list of pages */
137 struct slab *slab_page; /* slab fields */ 139 struct slab *slab_page; /* slab fields */
140 struct rcu_head rcu_head; /* Used by SLAB
141 * when destroying via RCU
142 */
143#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
144 pgtable_t pmd_huge_pte; /* protected by page->ptl */
145#endif
138 }; 146 };
139 147
140 /* Remainder is not double word aligned */ 148 /* Remainder is not double word aligned */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 87cce50bd121..009b02481436 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -26,11 +26,11 @@ struct msi_desc {
26 struct { 26 struct {
27 __u8 is_msix : 1; 27 __u8 is_msix : 1;
28 __u8 multiple: 3; /* log2 number of messages */ 28 __u8 multiple: 3; /* log2 number of messages */
29 __u8 maskbit : 1; /* mask-pending bit supported ? */ 29 __u8 maskbit : 1; /* mask-pending bit supported ? */
30 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 30 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
31 __u8 pos; /* Location of the msi capability */ 31 __u8 pos; /* Location of the msi capability */
32 __u16 entry_nr; /* specific enabled entry */ 32 __u16 entry_nr; /* specific enabled entry */
33 unsigned default_irq; /* default pre-assigned irq */ 33 unsigned default_irq; /* default pre-assigned irq */
34 } msi_attrib; 34 } msi_attrib;
35 35
36 u32 masked; /* mask bits */ 36 u32 masked; /* mask bits */
diff --git a/include/linux/net.h b/include/linux/net.h
index b292a0435571..69be3e6079c8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -164,6 +164,14 @@ struct proto_ops {
164#endif 164#endif
165 int (*sendmsg) (struct kiocb *iocb, struct socket *sock, 165 int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
166 struct msghdr *m, size_t total_len); 166 struct msghdr *m, size_t total_len);
167 /* Notes for implementing recvmsg:
168 * ===============================
169 * msg->msg_namelen should get updated by the recvmsg handlers
170 * iff msg_name != NULL. It is by default 0 to prevent
171 * returning uninitialized memory to user space. The recvfrom
172 * handlers can assume that msg.msg_name is either NULL or has
173 * a minimum size of sizeof(struct sockaddr_storage).
174 */
167 int (*recvmsg) (struct kiocb *iocb, struct socket *sock, 175 int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
168 struct msghdr *m, size_t total_len, 176 struct msghdr *m, size_t total_len,
169 int flags); 177 int flags);
@@ -173,7 +181,7 @@ struct proto_ops {
173 int offset, size_t size, int flags); 181 int offset, size_t size, int flags);
174 ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, 182 ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
175 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 183 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
176 void (*set_peek_off)(struct sock *sk, int val); 184 int (*set_peek_off)(struct sock *sk, int val);
177}; 185};
178 186
179#define DECLARE_SOCKADDR(type, dst, src) \ 187#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7f0ed423a360..d9a550bf3e8e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1255,7 +1255,7 @@ struct net_device {
1255 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 1255 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1256 unsigned char addr_assign_type; /* hw address assignment type */ 1256 unsigned char addr_assign_type; /* hw address assignment type */
1257 unsigned char addr_len; /* hardware address length */ 1257 unsigned char addr_len; /* hardware address length */
1258 unsigned char neigh_priv_len; 1258 unsigned short neigh_priv_len;
1259 unsigned short dev_id; /* Used to differentiate devices 1259 unsigned short dev_id; /* Used to differentiate devices
1260 * that share the same link 1260 * that share the same link
1261 * layer address 1261 * layer address
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c1637062c1ce..12c2cb947df5 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -413,16 +413,6 @@ enum lock_type4 {
413#define NFS4_VERSION 4 413#define NFS4_VERSION 4
414#define NFS4_MINOR_VERSION 0 414#define NFS4_MINOR_VERSION 0
415 415
416#if defined(CONFIG_NFS_V4_2)
417#define NFS4_MAX_MINOR_VERSION 2
418#else
419#if defined(CONFIG_NFS_V4_1)
420#define NFS4_MAX_MINOR_VERSION 1
421#else
422#define NFS4_MAX_MINOR_VERSION 0
423#endif /* CONFIG_NFS_V4_1 */
424#endif /* CONFIG_NFS_V4_2 */
425
426#define NFS4_DEBUG 1 416#define NFS4_DEBUG 1
427 417
428/* Index of predefined Linux client operations */ 418/* Index of predefined Linux client operations */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 14a48207a304..48997374eaf0 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -507,24 +507,6 @@ extern int nfs_mountpoint_expiry_timeout;
507extern void nfs_release_automount_timer(void); 507extern void nfs_release_automount_timer(void);
508 508
509/* 509/*
510 * linux/fs/nfs/nfs4proc.c
511 */
512#ifdef CONFIG_NFS_V4_SECURITY_LABEL
513extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
514static inline void nfs4_label_free(struct nfs4_label *label)
515{
516 if (label) {
517 kfree(label->label);
518 kfree(label);
519 }
520 return;
521}
522#else
523static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
524static inline void nfs4_label_free(void *label) {}
525#endif
526
527/*
528 * linux/fs/nfs/unlink.c 510 * linux/fs/nfs/unlink.c
529 */ 511 */
530extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); 512extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 86292beebfe2..438694650471 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -129,10 +129,9 @@ struct parallel_data {
129 struct padata_serial_queue __percpu *squeue; 129 struct padata_serial_queue __percpu *squeue;
130 atomic_t reorder_objects; 130 atomic_t reorder_objects;
131 atomic_t refcnt; 131 atomic_t refcnt;
132 atomic_t seq_nr;
132 struct padata_cpumask cpumask; 133 struct padata_cpumask cpumask;
133 spinlock_t lock ____cacheline_aligned; 134 spinlock_t lock ____cacheline_aligned;
134 spinlock_t seq_lock;
135 unsigned int seq_nr;
136 unsigned int processed; 135 unsigned int processed;
137 struct timer_list timer; 136 struct timer_list timer;
138}; 137};
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index d006f0ca60f4..5a462c4e5009 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -27,7 +27,7 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
27 while (!pci_is_root_bus(pbus)) 27 while (!pci_is_root_bus(pbus))
28 pbus = pbus->parent; 28 pbus = pbus->parent;
29 29
30 return DEVICE_ACPI_HANDLE(pbus->bridge); 30 return ACPI_HANDLE(pbus->bridge);
31} 31}
32 32
33static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) 33static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
@@ -39,7 +39,7 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
39 else 39 else
40 dev = &pbus->self->dev; 40 dev = &pbus->self->dev;
41 41
42 return DEVICE_ACPI_HANDLE(dev); 42 return ACPI_HANDLE(dev);
43} 43}
44 44
45void acpi_pci_add_bus(struct pci_bus *bus); 45void acpi_pci_add_bus(struct pci_bus *bus);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 835ec7bf6c05..a13d6825e586 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -32,7 +32,6 @@
32#include <linux/irqreturn.h> 32#include <linux/irqreturn.h>
33#include <uapi/linux/pci.h> 33#include <uapi/linux/pci.h>
34 34
35/* Include the ID list */
36#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
37 36
38/* 37/*
@@ -42,9 +41,10 @@
42 * 41 *
43 * 7:3 = slot 42 * 7:3 = slot
44 * 2:0 = function 43 * 2:0 = function
45 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined uapi/linux/pci.h 44 *
45 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
46 * In the interest of not exposing interfaces to user-space unnecessarily, 46 * In the interest of not exposing interfaces to user-space unnecessarily,
47 * the following kernel only defines are being added here. 47 * the following kernel-only defines are being added here.
48 */ 48 */
49#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn) 49#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn)
50/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ 50/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
@@ -153,10 +153,10 @@ enum pcie_reset_state {
153 /* Reset is NOT asserted (Use to deassert reset) */ 153 /* Reset is NOT asserted (Use to deassert reset) */
154 pcie_deassert_reset = (__force pcie_reset_state_t) 1, 154 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
155 155
156 /* Use #PERST to reset PCI-E device */ 156 /* Use #PERST to reset PCIe device */
157 pcie_warm_reset = (__force pcie_reset_state_t) 2, 157 pcie_warm_reset = (__force pcie_reset_state_t) 2,
158 158
159 /* Use PCI-E Hot Reset to reset device */ 159 /* Use PCIe Hot Reset to reset device */
160 pcie_hot_reset = (__force pcie_reset_state_t) 3 160 pcie_hot_reset = (__force pcie_reset_state_t) 3
161}; 161};
162 162
@@ -259,13 +259,13 @@ struct pci_dev {
259 unsigned int class; /* 3 bytes: (base,sub,prog-if) */ 259 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
260 u8 revision; /* PCI revision, low byte of class word */ 260 u8 revision; /* PCI revision, low byte of class word */
261 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 261 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
262 u8 pcie_cap; /* PCI-E capability offset */ 262 u8 pcie_cap; /* PCIe capability offset */
263 u8 msi_cap; /* MSI capability offset */ 263 u8 msi_cap; /* MSI capability offset */
264 u8 msix_cap; /* MSI-X capability offset */ 264 u8 msix_cap; /* MSI-X capability offset */
265 u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */ 265 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
266 u8 rom_base_reg; /* which config register controls the ROM */ 266 u8 rom_base_reg; /* which config register controls the ROM */
267 u8 pin; /* which interrupt pin this device uses */ 267 u8 pin; /* which interrupt pin this device uses */
268 u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */ 268 u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
269 269
270 struct pci_driver *driver; /* which driver has allocated this device */ 270 struct pci_driver *driver; /* which driver has allocated this device */
271 u64 dma_mask; /* Mask of the bits of bus address this 271 u64 dma_mask; /* Mask of the bits of bus address this
@@ -300,7 +300,7 @@ struct pci_dev {
300 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ 300 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
301 301
302#ifdef CONFIG_PCIEASPM 302#ifdef CONFIG_PCIEASPM
303 struct pcie_link_state *link_state; /* ASPM link state. */ 303 struct pcie_link_state *link_state; /* ASPM link state */
304#endif 304#endif
305 305
306 pci_channel_state_t error_state; /* current connectivity state */ 306 pci_channel_state_t error_state; /* current connectivity state */
@@ -317,7 +317,7 @@ struct pci_dev {
317 317
318 bool match_driver; /* Skip attaching driver */ 318 bool match_driver; /* Skip attaching driver */
319 /* These fields are used by common fixups */ 319 /* These fields are used by common fixups */
320 unsigned int transparent:1; /* Transparent PCI bridge */ 320 unsigned int transparent:1; /* Subtractive decode PCI bridge */
321 unsigned int multifunction:1;/* Part of multi-function device */ 321 unsigned int multifunction:1;/* Part of multi-function device */
322 /* keep track of device state */ 322 /* keep track of device state */
323 unsigned int is_added:1; 323 unsigned int is_added:1;
@@ -326,7 +326,7 @@ struct pci_dev {
326 unsigned int block_cfg_access:1; /* config space access is blocked */ 326 unsigned int block_cfg_access:1; /* config space access is blocked */
327 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 327 unsigned int broken_parity_status:1; /* Device generates false positive parity */
328 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ 328 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
329 unsigned int msi_enabled:1; 329 unsigned int msi_enabled:1;
330 unsigned int msix_enabled:1; 330 unsigned int msix_enabled:1;
331 unsigned int ari_enabled:1; /* ARI forwarding */ 331 unsigned int ari_enabled:1; /* ARI forwarding */
332 unsigned int is_managed:1; 332 unsigned int is_managed:1;
@@ -371,7 +371,6 @@ static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
371 if (dev->is_virtfn) 371 if (dev->is_virtfn)
372 dev = dev->physfn; 372 dev = dev->physfn;
373#endif 373#endif
374
375 return dev; 374 return dev;
376} 375}
377 376
@@ -456,7 +455,7 @@ struct pci_bus {
456 char name[48]; 455 char name[48];
457 456
458 unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ 457 unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
459 pci_bus_flags_t bus_flags; /* Inherited by child busses */ 458 pci_bus_flags_t bus_flags; /* inherited by child buses */
460 struct device *bridge; 459 struct device *bridge;
461 struct device dev; 460 struct device dev;
462 struct bin_attribute *legacy_io; /* legacy I/O for this bus */ 461 struct bin_attribute *legacy_io; /* legacy I/O for this bus */
@@ -468,7 +467,7 @@ struct pci_bus {
468#define to_pci_bus(n) container_of(n, struct pci_bus, dev) 467#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
469 468
470/* 469/*
471 * Returns true if the pci bus is root (behind host-pci bridge), 470 * Returns true if the PCI bus is root (behind host-PCI bridge),
472 * false otherwise 471 * false otherwise
473 * 472 *
474 * Some code assumes that "bus->self == NULL" means that bus is a root bus. 473 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
@@ -510,7 +509,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false;
510#define PCIBIOS_BUFFER_TOO_SMALL 0x89 509#define PCIBIOS_BUFFER_TOO_SMALL 0x89
511 510
512/* 511/*
513 * Translate above to generic errno for passing back through non-pci. 512 * Translate above to generic errno for passing back through non-PCI code.
514 */ 513 */
515static inline int pcibios_err_to_errno(int err) 514static inline int pcibios_err_to_errno(int err)
516{ 515{
@@ -561,11 +560,12 @@ struct pci_dynids {
561 struct list_head list; /* for IDs added at runtime */ 560 struct list_head list; /* for IDs added at runtime */
562}; 561};
563 562
564/* ---------------------------------------------------------------- */ 563
565/** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides 564/*
566 * a set of callbacks in struct pci_error_handlers, then that device driver 565 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
567 * will be notified of PCI bus errors, and will be driven to recovery 566 * a set of callbacks in struct pci_error_handlers, that device driver
568 * when an error occurs. 567 * will be notified of PCI bus errors, and will be driven to recovery
568 * when an error occurs.
569 */ 569 */
570 570
571typedef unsigned int __bitwise pci_ers_result_t; 571typedef unsigned int __bitwise pci_ers_result_t;
@@ -609,7 +609,6 @@ struct pci_error_handlers {
609 void (*resume)(struct pci_dev *dev); 609 void (*resume)(struct pci_dev *dev);
610}; 610};
611 611
612/* ---------------------------------------------------------------- */
613 612
614struct module; 613struct module;
615struct pci_driver { 614struct pci_driver {
@@ -713,10 +712,10 @@ extern enum pcie_bus_config_types pcie_bus_config;
713 712
714extern struct bus_type pci_bus_type; 713extern struct bus_type pci_bus_type;
715 714
716/* Do NOT directly access these two variables, unless you are arch specific pci 715/* Do NOT directly access these two variables, unless you are arch-specific PCI
717 * code, or pci core code. */ 716 * code, or PCI core code. */
718extern struct list_head pci_root_buses; /* list of all known PCI buses */ 717extern struct list_head pci_root_buses; /* list of all known PCI buses */
719/* Some device drivers need know if pci is initiated */ 718/* Some device drivers need know if PCI is initiated */
720int no_pci_devices(void); 719int no_pci_devices(void);
721 720
722void pcibios_resource_survey_bus(struct pci_bus *bus); 721void pcibios_resource_survey_bus(struct pci_bus *bus);
@@ -724,7 +723,7 @@ void pcibios_add_bus(struct pci_bus *bus);
724void pcibios_remove_bus(struct pci_bus *bus); 723void pcibios_remove_bus(struct pci_bus *bus);
725void pcibios_fixup_bus(struct pci_bus *); 724void pcibios_fixup_bus(struct pci_bus *);
726int __must_check pcibios_enable_device(struct pci_dev *, int mask); 725int __must_check pcibios_enable_device(struct pci_dev *, int mask);
727/* Architecture specific versions may override this (weak) */ 726/* Architecture-specific versions may override this (weak) */
728char *pcibios_setup(char *str); 727char *pcibios_setup(char *str);
729 728
730/* Used only when drivers/pci/setup.c is used */ 729/* Used only when drivers/pci/setup.c is used */
@@ -961,6 +960,7 @@ void pci_update_resource(struct pci_dev *dev, int resno);
961int __must_check pci_assign_resource(struct pci_dev *dev, int i); 960int __must_check pci_assign_resource(struct pci_dev *dev, int i);
962int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); 961int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
963int pci_select_bars(struct pci_dev *dev, unsigned long flags); 962int pci_select_bars(struct pci_dev *dev, unsigned long flags);
963bool pci_device_is_present(struct pci_dev *pdev);
964 964
965/* ROM control related routines */ 965/* ROM control related routines */
966int pci_enable_rom(struct pci_dev *pdev); 966int pci_enable_rom(struct pci_dev *pdev);
@@ -1258,7 +1258,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
1258 1258
1259/* 1259/*
1260 * PCI domain support. Sometimes called PCI segment (eg by ACPI), 1260 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1261 * a PCI domain is defined to be a set of PCI busses which share 1261 * a PCI domain is defined to be a set of PCI buses which share
1262 * configuration space. 1262 * configuration space.
1263 */ 1263 */
1264#ifdef CONFIG_PCI_DOMAINS 1264#ifdef CONFIG_PCI_DOMAINS
@@ -1568,65 +1568,65 @@ enum pci_fixup_pass {
1568/* Anonymous variables would be nice... */ 1568/* Anonymous variables would be nice... */
1569#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ 1569#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
1570 class_shift, hook) \ 1570 class_shift, hook) \
1571 static const struct pci_fixup __pci_fixup_##name __used \ 1571 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
1572 __attribute__((__section__(#section), aligned((sizeof(void *))))) \ 1572 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
1573 = { vendor, device, class, class_shift, hook }; 1573 = { vendor, device, class, class_shift, hook };
1574 1574
1575#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ 1575#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
1576 class_shift, hook) \ 1576 class_shift, hook) \
1577 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1577 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1578 vendor##device##hook, vendor, device, class, class_shift, hook) 1578 hook, vendor, device, class, class_shift, hook)
1579#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ 1579#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
1580 class_shift, hook) \ 1580 class_shift, hook) \
1581 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1581 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1582 vendor##device##hook, vendor, device, class, class_shift, hook) 1582 hook, vendor, device, class, class_shift, hook)
1583#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ 1583#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
1584 class_shift, hook) \ 1584 class_shift, hook) \
1585 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1585 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1586 vendor##device##hook, vendor, device, class, class_shift, hook) 1586 hook, vendor, device, class, class_shift, hook)
1587#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ 1587#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
1588 class_shift, hook) \ 1588 class_shift, hook) \
1589 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1589 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1590 vendor##device##hook, vendor, device, class, class_shift, hook) 1590 hook, vendor, device, class, class_shift, hook)
1591#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ 1591#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1592 class_shift, hook) \ 1592 class_shift, hook) \
1593 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1593 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1594 resume##vendor##device##hook, vendor, device, class, \ 1594 resume##hook, vendor, device, class, \
1595 class_shift, hook) 1595 class_shift, hook)
1596#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ 1596#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1597 class_shift, hook) \ 1597 class_shift, hook) \
1598 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1598 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1599 resume_early##vendor##device##hook, vendor, device, \ 1599 resume_early##hook, vendor, device, \
1600 class, class_shift, hook) 1600 class, class_shift, hook)
1601#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ 1601#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1602 class_shift, hook) \ 1602 class_shift, hook) \
1603 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1603 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1604 suspend##vendor##device##hook, vendor, device, class, \ 1604 suspend##hook, vendor, device, class, \
1605 class_shift, hook) 1605 class_shift, hook)
1606 1606
1607#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 1607#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1608 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1608 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1609 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1609 hook, vendor, device, PCI_ANY_ID, 0, hook)
1610#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ 1610#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
1611 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1611 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1612 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1612 hook, vendor, device, PCI_ANY_ID, 0, hook)
1613#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ 1613#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
1614 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1614 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1615 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1615 hook, vendor, device, PCI_ANY_ID, 0, hook)
1616#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ 1616#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
1617 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1617 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1618 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1618 hook, vendor, device, PCI_ANY_ID, 0, hook)
1619#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 1619#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1620 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1620 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1621 resume##vendor##device##hook, vendor, device, \ 1621 resume##hook, vendor, device, \
1622 PCI_ANY_ID, 0, hook) 1622 PCI_ANY_ID, 0, hook)
1623#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 1623#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1624 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1624 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1625 resume_early##vendor##device##hook, vendor, device, \ 1625 resume_early##hook, vendor, device, \
1626 PCI_ANY_ID, 0, hook) 1626 PCI_ANY_ID, 0, hook)
1627#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 1627#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1628 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1628 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1629 suspend##vendor##device##hook, vendor, device, \ 1629 suspend##hook, vendor, device, \
1630 PCI_ANY_ID, 0, hook) 1630 PCI_ANY_ID, 0, hook)
1631 1631
1632#ifdef CONFIG_PCI_QUIRKS 1632#ifdef CONFIG_PCI_QUIRKS
@@ -1672,7 +1672,7 @@ extern u8 pci_cache_line_size;
1672extern unsigned long pci_hotplug_io_size; 1672extern unsigned long pci_hotplug_io_size;
1673extern unsigned long pci_hotplug_mem_size; 1673extern unsigned long pci_hotplug_mem_size;
1674 1674
1675/* Architecture specific versions may override these (weak) */ 1675/* Architecture-specific versions may override these (weak) */
1676int pcibios_add_platform_entries(struct pci_dev *dev); 1676int pcibios_add_platform_entries(struct pci_dev *dev);
1677void pcibios_disable_device(struct pci_dev *dev); 1677void pcibios_disable_device(struct pci_dev *dev);
1678void pcibios_set_master(struct pci_dev *dev); 1678void pcibios_set_master(struct pci_dev *dev);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 430dd963707b..a2e2f1d17e16 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -39,8 +39,8 @@
39 * @hardware_test: Called to run a specified hardware test on the specified 39 * @hardware_test: Called to run a specified hardware test on the specified
40 * slot. 40 * slot.
41 * @get_power_status: Called to get the current power status of a slot. 41 * @get_power_status: Called to get the current power status of a slot.
42 * If this field is NULL, the value passed in the struct hotplug_slot_info 42 * If this field is NULL, the value passed in the struct hotplug_slot_info
43 * will be used when this value is requested by a user. 43 * will be used when this value is requested by a user.
44 * @get_attention_status: Called to get the current attention status of a slot. 44 * @get_attention_status: Called to get the current attention status of a slot.
45 * If this field is NULL, the value passed in the struct hotplug_slot_info 45 * If this field is NULL, the value passed in the struct hotplug_slot_info
46 * will be used when this value is requested by a user. 46 * will be used when this value is requested by a user.
@@ -191,4 +191,3 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
191 191
192void pci_configure_slot(struct pci_dev *dev); 192void pci_configure_slot(struct pci_dev *dev);
193#endif 193#endif
194
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 9572669eea97..4f1089f2cc98 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -23,7 +23,7 @@
23#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) 23#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
24 24
25struct pcie_device { 25struct pcie_device {
26 int irq; /* Service IRQ/MSI/MSI-X Vector */ 26 int irq; /* Service IRQ/MSI/MSI-X Vector */
27 struct pci_dev *port; /* Root/Upstream/Downstream Port */ 27 struct pci_dev *port; /* Root/Upstream/Downstream Port */
28 u32 service; /* Port service this device represents */ 28 u32 service; /* Port service this device represents */
29 void *priv_data; /* Service Private Data */ 29 void *priv_data; /* Service Private Data */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 64ab823f7b74..48a4dc3cb8cf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -559,6 +559,7 @@ static inline int phy_read_status(struct phy_device *phydev) {
559 return phydev->drv->read_status(phydev); 559 return phydev->drv->read_status(phydev);
560} 560}
561 561
562int genphy_setup_forced(struct phy_device *phydev);
562int genphy_restart_aneg(struct phy_device *phydev); 563int genphy_restart_aneg(struct phy_device *phydev);
563int genphy_config_aneg(struct phy_device *phydev); 564int genphy_config_aneg(struct phy_device *phydev);
564int genphy_update_link(struct phy_device *phydev); 565int genphy_update_link(struct phy_device *phydev);
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index 179fb91bb5f2..f50821cb64be 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -67,10 +67,10 @@ struct edmacc_param {
67#define ITCCHEN BIT(23) 67#define ITCCHEN BIT(23)
68 68
69/*ch_status paramater of callback function possible values*/ 69/*ch_status paramater of callback function possible values*/
70#define DMA_COMPLETE 1 70#define EDMA_DMA_COMPLETE 1
71#define DMA_CC_ERROR 2 71#define EDMA_DMA_CC_ERROR 2
72#define DMA_TC1_ERROR 3 72#define EDMA_DMA_TC1_ERROR 3
73#define DMA_TC2_ERROR 4 73#define EDMA_DMA_TC2_ERROR 4
74 74
75enum address_mode { 75enum address_mode {
76 INCR = 0, 76 INCR = 0,
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
index 931bc616219f..d169820203dd 100644
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -11,36 +11,23 @@
11 * - bits 0-7 are the preemption count (max preemption depth: 256) 11 * - bits 0-7 are the preemption count (max preemption depth: 256)
12 * - bits 8-15 are the softirq count (max # of softirqs: 256) 12 * - bits 8-15 are the softirq count (max # of softirqs: 256)
13 * 13 *
14 * The hardirq count can in theory reach the same as NR_IRQS. 14 * The hardirq count could in theory be the same as the number of
15 * In reality, the number of nested IRQS is limited to the stack 15 * interrupts in the system, but we run all interrupt handlers with
16 * size as well. For archs with over 1000 IRQS it is not practical 16 * interrupts disabled, so we cannot have nesting interrupts. Though
17 * to expect that they will all nest. We give a max of 10 bits for 17 * there are a few palaeontologic drivers which reenable interrupts in
18 * hardirq nesting. An arch may choose to give less than 10 bits. 18 * the handler, so we need more than one bit here.
19 * m68k expects it to be 8.
20 * 19 *
21 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) 20 * PREEMPT_MASK: 0x000000ff
22 * - bit 26 is the NMI_MASK 21 * SOFTIRQ_MASK: 0x0000ff00
23 * - bit 27 is the PREEMPT_ACTIVE flag 22 * HARDIRQ_MASK: 0x000f0000
24 * 23 * NMI_MASK: 0x00100000
25 * PREEMPT_MASK: 0x000000ff 24 * PREEMPT_ACTIVE: 0x00200000
26 * SOFTIRQ_MASK: 0x0000ff00
27 * HARDIRQ_MASK: 0x03ff0000
28 * NMI_MASK: 0x04000000
29 */ 25 */
30#define PREEMPT_BITS 8 26#define PREEMPT_BITS 8
31#define SOFTIRQ_BITS 8 27#define SOFTIRQ_BITS 8
28#define HARDIRQ_BITS 4
32#define NMI_BITS 1 29#define NMI_BITS 1
33 30
34#define MAX_HARDIRQ_BITS 10
35
36#ifndef HARDIRQ_BITS
37# define HARDIRQ_BITS MAX_HARDIRQ_BITS
38#endif
39
40#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
41#error HARDIRQ_BITS too high!
42#endif
43
44#define PREEMPT_SHIFT 0 31#define PREEMPT_SHIFT 0
45#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 32#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
46#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 33#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
@@ -60,15 +47,9 @@
60 47
61#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) 48#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
62 49
63#ifndef PREEMPT_ACTIVE
64#define PREEMPT_ACTIVE_BITS 1 50#define PREEMPT_ACTIVE_BITS 1
65#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) 51#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
66#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) 52#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
67#endif
68
69#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
70#error PREEMPT_ACTIVE is too low!
71#endif
72 53
73#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 54#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
74#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 55#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6f7ffa460089..768b037dfacb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -22,7 +22,7 @@ struct sched_param {
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/nodemask.h> 23#include <linux/nodemask.h>
24#include <linux/mm_types.h> 24#include <linux/mm_types.h>
25#include <linux/preempt.h> 25#include <linux/preempt_mask.h>
26 26
27#include <asm/page.h> 27#include <asm/page.h>
28#include <asm/ptrace.h> 28#include <asm/ptrace.h>
@@ -831,8 +831,6 @@ struct sched_domain {
831 unsigned int balance_interval; /* initialise to 1. units in ms. */ 831 unsigned int balance_interval; /* initialise to 1. units in ms. */
832 unsigned int nr_balance_failed; /* initialise to 0 */ 832 unsigned int nr_balance_failed; /* initialise to 0 */
833 833
834 u64 last_update;
835
836 /* idle_balance() stats */ 834 /* idle_balance() stats */
837 u64 max_newidle_lb_cost; 835 u64 max_newidle_lb_cost;
838 unsigned long next_decay_max_lb_cost; 836 unsigned long next_decay_max_lb_cost;
diff --git a/include/linux/security.h b/include/linux/security.h
index 9d37e2b9d3ec..5623a7f965b7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1052,17 +1052,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1052 * @xfrm_policy_delete_security: 1052 * @xfrm_policy_delete_security:
1053 * @ctx contains the xfrm_sec_ctx. 1053 * @ctx contains the xfrm_sec_ctx.
1054 * Authorize deletion of xp->security. 1054 * Authorize deletion of xp->security.
1055 * @xfrm_state_alloc_security: 1055 * @xfrm_state_alloc:
1056 * @x contains the xfrm_state being added to the Security Association 1056 * @x contains the xfrm_state being added to the Security Association
1057 * Database by the XFRM system. 1057 * Database by the XFRM system.
1058 * @sec_ctx contains the security context information being provided by 1058 * @sec_ctx contains the security context information being provided by
1059 * the user-level SA generation program (e.g., setkey or racoon). 1059 * the user-level SA generation program (e.g., setkey or racoon).
1060 * @secid contains the secid from which to take the mls portion of the context.
1061 * Allocate a security structure to the x->security field; the security 1060 * Allocate a security structure to the x->security field; the security
1062 * field is initialized to NULL when the xfrm_state is allocated. Set the 1061 * field is initialized to NULL when the xfrm_state is allocated. Set the
1063 * context to correspond to either sec_ctx or polsec, with the mls portion 1062 * context to correspond to sec_ctx. Return 0 if operation was successful
1064 * taken from secid in the latter case. 1063 * (memory to allocate, legal context).
1065 * Return 0 if operation was successful (memory to allocate, legal context). 1064 * @xfrm_state_alloc_acquire:
1065 * @x contains the xfrm_state being added to the Security Association
1066 * Database by the XFRM system.
1067 * @polsec contains the policy's security context.
1068 * @secid contains the secid from which to take the mls portion of the
1069 * context.
1070 * Allocate a security structure to the x->security field; the security
1071 * field is initialized to NULL when the xfrm_state is allocated. Set the
1072 * context to correspond to secid. Return 0 if operation was successful
1073 * (memory to allocate, legal context).
1066 * @xfrm_state_free_security: 1074 * @xfrm_state_free_security:
1067 * @x contains the xfrm_state. 1075 * @x contains the xfrm_state.
1068 * Deallocate x->security. 1076 * Deallocate x->security.
@@ -1679,9 +1687,11 @@ struct security_operations {
1679 int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); 1687 int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
1680 void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); 1688 void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
1681 int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); 1689 int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
1682 int (*xfrm_state_alloc_security) (struct xfrm_state *x, 1690 int (*xfrm_state_alloc) (struct xfrm_state *x,
1683 struct xfrm_user_sec_ctx *sec_ctx, 1691 struct xfrm_user_sec_ctx *sec_ctx);
1684 u32 secid); 1692 int (*xfrm_state_alloc_acquire) (struct xfrm_state *x,
1693 struct xfrm_sec_ctx *polsec,
1694 u32 secid);
1685 void (*xfrm_state_free_security) (struct xfrm_state *x); 1695 void (*xfrm_state_free_security) (struct xfrm_state *x);
1686 int (*xfrm_state_delete_security) (struct xfrm_state *x); 1696 int (*xfrm_state_delete_security) (struct xfrm_state *x);
1687 int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); 1697 int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 1e8a8b6e837d..cf87a24c0f92 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -354,6 +354,35 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
354 spin_unlock(&sl->lock); 354 spin_unlock(&sl->lock);
355} 355}
356 356
357/**
358 * read_seqbegin_or_lock - begin a sequence number check or locking block
359 * @lock: sequence lock
360 * @seq : sequence number to be checked
361 *
362 * First try it once optimistically without taking the lock. If that fails,
363 * take the lock. The sequence number is also used as a marker for deciding
364 * whether to be a reader (even) or writer (odd).
365 * N.B. seq must be initialized to an even number to begin with.
366 */
367static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
368{
369 if (!(*seq & 1)) /* Even */
370 *seq = read_seqbegin(lock);
371 else /* Odd */
372 read_seqlock_excl(lock);
373}
374
375static inline int need_seqretry(seqlock_t *lock, int seq)
376{
377 return !(seq & 1) && read_seqretry(lock, seq);
378}
379
380static inline void done_seqretry(seqlock_t *lock, int seq)
381{
382 if (seq & 1)
383 read_sequnlock_excl(lock);
384}
385
357static inline void read_seqlock_excl_bh(seqlock_t *sl) 386static inline void read_seqlock_excl_bh(seqlock_t *sl)
358{ 387{
359 spin_lock_bh(&sl->lock); 388 spin_lock_bh(&sl->lock);
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 30aa0dc60d75..9d55438bc4ad 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -47,6 +47,8 @@ extern int shmem_init(void);
47extern int shmem_fill_super(struct super_block *sb, void *data, int silent); 47extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
48extern struct file *shmem_file_setup(const char *name, 48extern struct file *shmem_file_setup(const char *name,
49 loff_t size, unsigned long flags); 49 loff_t size, unsigned long flags);
50extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
51 unsigned long flags);
50extern int shmem_zero_setup(struct vm_area_struct *); 52extern int shmem_zero_setup(struct vm_area_struct *);
51extern int shmem_lock(struct file *file, int lock, struct user_struct *user); 53extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
52extern void shmem_unlock_mapping(struct address_space *mapping); 54extern void shmem_unlock_mapping(struct address_space *mapping);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 74f105847d13..1e2f4fe12773 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -53,7 +53,14 @@
53 * } 53 * }
54 * rcu_read_unlock(); 54 * rcu_read_unlock();
55 * 55 *
56 * See also the comment on struct slab_rcu in mm/slab.c. 56 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
57 */ 64 */
58#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 65#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
59#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 66#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
@@ -381,10 +388,55 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
381/** 388/**
382 * kmalloc - allocate memory 389 * kmalloc - allocate memory
383 * @size: how many bytes of memory are required. 390 * @size: how many bytes of memory are required.
384 * @flags: the type of memory to allocate (see kcalloc). 391 * @flags: the type of memory to allocate.
385 * 392 *
386 * kmalloc is the normal method of allocating memory 393 * kmalloc is the normal method of allocating memory
387 * for objects smaller than page size in the kernel. 394 * for objects smaller than page size in the kernel.
395 *
396 * The @flags argument may be one of:
397 *
398 * %GFP_USER - Allocate memory on behalf of user. May sleep.
399 *
400 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
401 *
402 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
403 * For example, use this inside interrupt handlers.
404 *
405 * %GFP_HIGHUSER - Allocate pages from high memory.
406 *
407 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
408 *
409 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
410 *
411 * %GFP_NOWAIT - Allocation will not sleep.
412 *
413 * %GFP_THISNODE - Allocate node-local memory only.
414 *
415 * %GFP_DMA - Allocation suitable for DMA.
416 * Should only be used for kmalloc() caches. Otherwise, use a
417 * slab created with SLAB_DMA.
418 *
419 * Also it is possible to set different flags by OR'ing
420 * in one or more of the following additional @flags:
421 *
422 * %__GFP_COLD - Request cache-cold pages instead of
423 * trying to return cache-warm pages.
424 *
425 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
426 *
427 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
428 * (think twice before using).
429 *
430 * %__GFP_NORETRY - If memory is not immediately available,
431 * then give up at once.
432 *
433 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
434 *
435 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
436 *
437 * There are other flags available as well, but these are not intended
438 * for general use, and so are not documented here. For a full list of
439 * potential flags, always refer to linux/gfp.h.
388 */ 440 */
389static __always_inline void *kmalloc(size_t size, gfp_t flags) 441static __always_inline void *kmalloc(size_t size, gfp_t flags)
390{ 442{
@@ -495,61 +547,6 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
495void print_slabinfo_header(struct seq_file *m); 547void print_slabinfo_header(struct seq_file *m);
496 548
497/** 549/**
498 * kmalloc - allocate memory
499 * @size: how many bytes of memory are required.
500 * @flags: the type of memory to allocate.
501 *
502 * The @flags argument may be one of:
503 *
504 * %GFP_USER - Allocate memory on behalf of user. May sleep.
505 *
506 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
507 *
508 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
509 * For example, use this inside interrupt handlers.
510 *
511 * %GFP_HIGHUSER - Allocate pages from high memory.
512 *
513 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
514 *
515 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
516 *
517 * %GFP_NOWAIT - Allocation will not sleep.
518 *
519 * %GFP_THISNODE - Allocate node-local memory only.
520 *
521 * %GFP_DMA - Allocation suitable for DMA.
522 * Should only be used for kmalloc() caches. Otherwise, use a
523 * slab created with SLAB_DMA.
524 *
525 * Also it is possible to set different flags by OR'ing
526 * in one or more of the following additional @flags:
527 *
528 * %__GFP_COLD - Request cache-cold pages instead of
529 * trying to return cache-warm pages.
530 *
531 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
532 *
533 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
534 * (think twice before using).
535 *
536 * %__GFP_NORETRY - If memory is not immediately available,
537 * then give up at once.
538 *
539 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
540 *
541 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
542 *
543 * There are other flags available as well, but these are not intended
544 * for general use, and so are not documented here. For a full list of
545 * potential flags, always refer to linux/gfp.h.
546 *
547 * kmalloc is the normal method of allocating memory
548 * in the kernel.
549 */
550static __always_inline void *kmalloc(size_t size, gfp_t flags);
551
552/**
553 * kmalloc_array - allocate memory for an array. 550 * kmalloc_array - allocate memory for an array.
554 * @n: number of elements. 551 * @n: number of elements.
555 * @size: element size. 552 * @size: element size.
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index e9346b4f1ef4..09bfffb08a56 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -27,8 +27,8 @@ struct kmem_cache {
27 27
28 size_t colour; /* cache colouring range */ 28 size_t colour; /* cache colouring range */
29 unsigned int colour_off; /* colour offset */ 29 unsigned int colour_off; /* colour offset */
30 struct kmem_cache *slabp_cache; 30 struct kmem_cache *freelist_cache;
31 unsigned int slab_size; 31 unsigned int freelist_size;
32 32
33 /* constructor func */ 33 /* constructor func */
34 void (*ctor)(void *obj); 34 void (*ctor)(void *obj);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index cc0b67eada42..f56bfa9e4526 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -11,7 +11,7 @@
11enum stat_item { 11enum stat_item {
12 ALLOC_FASTPATH, /* Allocation from cpu slab */ 12 ALLOC_FASTPATH, /* Allocation from cpu slab */
13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
14 FREE_FASTPATH, /* Free to cpu slub */ 14 FREE_FASTPATH, /* Free to cpu slab */
15 FREE_SLOWPATH, /* Freeing not to cpu slab */ 15 FREE_SLOWPATH, /* Freeing not to cpu slab */
16 FREE_FROZEN, /* Freeing to frozen slab */ 16 FREE_FROZEN, /* Freeing to frozen slab */
17 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 17 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
diff --git a/include/linux/tegra-powergate.h b/include/linux/tegra-powergate.h
index c98cfa406952..fd4498329c7c 100644
--- a/include/linux/tegra-powergate.h
+++ b/include/linux/tegra-powergate.h
@@ -45,6 +45,7 @@ struct clk;
45 45
46#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D 46#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
47 47
48#ifdef CONFIG_ARCH_TEGRA
48int tegra_powergate_is_powered(int id); 49int tegra_powergate_is_powered(int id);
49int tegra_powergate_power_on(int id); 50int tegra_powergate_power_on(int id);
50int tegra_powergate_power_off(int id); 51int tegra_powergate_power_off(int id);
@@ -52,5 +53,31 @@ int tegra_powergate_remove_clamping(int id);
52 53
53/* Must be called with clk disabled, and returns with clk enabled */ 54/* Must be called with clk disabled, and returns with clk enabled */
54int tegra_powergate_sequence_power_up(int id, struct clk *clk); 55int tegra_powergate_sequence_power_up(int id, struct clk *clk);
56#else
57static inline int tegra_powergate_is_powered(int id)
58{
59 return -ENOSYS;
60}
61
62static inline int tegra_powergate_power_on(int id)
63{
64 return -ENOSYS;
65}
66
67static inline int tegra_powergate_power_off(int id)
68{
69 return -ENOSYS;
70}
71
72static inline int tegra_powergate_remove_clamping(int id)
73{
74 return -ENOSYS;
75}
76
77static inline int tegra_powergate_sequence_power_up(int id, struct clk *clk)
78{
79 return -ENOSYS;
80}
81#endif
55 82
56#endif /* _MACH_TEGRA_POWERGATE_H_ */ 83#endif /* _MACH_TEGRA_POWERGATE_H_ */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 7454865ad148..512ab162832c 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1264,6 +1264,8 @@ typedef void (*usb_complete_t)(struct urb *);
1264 * @sg: scatter gather buffer list, the buffer size of each element in 1264 * @sg: scatter gather buffer list, the buffer size of each element in
1265 * the list (except the last) must be divisible by the endpoint's 1265 * the list (except the last) must be divisible by the endpoint's
1266 * max packet size if no_sg_constraint isn't set in 'struct usb_bus' 1266 * max packet size if no_sg_constraint isn't set in 'struct usb_bus'
1267 * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
1268 * Do not use urb->sg for interrupt endpoints for now, only bulk.)
1267 * @num_mapped_sgs: (internal) number of mapped sg entries 1269 * @num_mapped_sgs: (internal) number of mapped sg entries
1268 * @num_sgs: number of entries in the sg list 1270 * @num_sgs: number of entries in the sg list
1269 * @transfer_buffer_length: How big is transfer_buffer. The transfer may 1271 * @transfer_buffer_length: How big is transfer_buffer. The transfer may
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 0c4d4ca370ec..eeb28329fa3c 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -271,6 +271,8 @@ static inline u8 wusb_key_index(int index, int type, int originator)
271#define WUSB_KEY_INDEX_TYPE_GTK 2 271#define WUSB_KEY_INDEX_TYPE_GTK 2
272#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 272#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
273#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 273#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
274/* bits 0-3 used for the key index. */
275#define WUSB_KEY_INDEX_MAX 15
274 276
275/* A CCM Nonce, defined in WUSB1.0[6.4.1] */ 277/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
276struct aes_ccm_nonce { 278struct aes_ccm_nonce {
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 4db29859464f..4836ba3c1cd8 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -27,6 +27,12 @@ struct user_namespace {
27 kuid_t owner; 27 kuid_t owner;
28 kgid_t group; 28 kgid_t group;
29 unsigned int proc_inum; 29 unsigned int proc_inum;
30
31 /* Register of per-UID persistent keyrings for this namespace */
32#ifdef CONFIG_PERSISTENT_KEYRINGS
33 struct key *persistent_keyring_register;
34 struct rw_semaphore persistent_keyring_register_sem;
35#endif
30}; 36};
31 37
32extern struct user_namespace init_user_ns; 38extern struct user_namespace init_user_ns;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 61939ba30aa0..eaa00b10abaa 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -278,6 +278,31 @@ do { \
278 __ret; \ 278 __ret; \
279}) 279})
280 280
281#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
282 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
283 cmd1; schedule(); cmd2)
284
285/**
286 * wait_event_cmd - sleep until a condition gets true
287 * @wq: the waitqueue to wait on
288 * @condition: a C expression for the event to wait for
289 * cmd1: the command will be executed before sleep
290 * cmd2: the command will be executed after sleep
291 *
292 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
293 * @condition evaluates to true. The @condition is checked each time
294 * the waitqueue @wq is woken up.
295 *
296 * wake_up() has to be called after changing any variable that could
297 * change the result of the wait condition.
298 */
299#define wait_event_cmd(wq, condition, cmd1, cmd2) \
300do { \
301 if (condition) \
302 break; \
303 __wait_event_cmd(wq, condition, cmd1, cmd2); \
304} while (0)
305
281#define __wait_event_interruptible(wq, condition) \ 306#define __wait_event_interruptible(wq, condition) \
282 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ 307 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
283 schedule()) 308 schedule())
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bd8218b15009..941055e9d125 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -83,7 +83,7 @@ struct vb2_fileio_data;
83struct vb2_mem_ops { 83struct vb2_mem_ops {
84 void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags); 84 void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags);
85 void (*put)(void *buf_priv); 85 void (*put)(void *buf_priv);
86 struct dma_buf *(*get_dmabuf)(void *buf_priv); 86 struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
87 87
88 void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, 88 void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr,
89 unsigned long size, int write); 89 unsigned long size, int write);
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 9b787b62cf16..1b177ed803b7 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -10,16 +10,9 @@
10/** 10/**
11 * struct genl_multicast_group - generic netlink multicast group 11 * struct genl_multicast_group - generic netlink multicast group
12 * @name: name of the multicast group, names are per-family 12 * @name: name of the multicast group, names are per-family
13 * @id: multicast group ID, assigned by the core, to use with
14 * genlmsg_multicast().
15 * @list: list entry for linking
16 * @family: pointer to family, need not be set before registering
17 */ 13 */
18struct genl_multicast_group { 14struct genl_multicast_group {
19 struct genl_family *family; /* private */
20 struct list_head list; /* private */
21 char name[GENL_NAMSIZ]; 15 char name[GENL_NAMSIZ];
22 u32 id;
23}; 16};
24 17
25struct genl_ops; 18struct genl_ops;
@@ -39,9 +32,12 @@ struct genl_info;
39 * @post_doit: called after an operation's doit callback, it may 32 * @post_doit: called after an operation's doit callback, it may
40 * undo operations done by pre_doit, for example release locks 33 * undo operations done by pre_doit, for example release locks
41 * @attrbuf: buffer to store parsed attributes 34 * @attrbuf: buffer to store parsed attributes
42 * @ops_list: list of all assigned operations
43 * @family_list: family list 35 * @family_list: family list
44 * @mcast_groups: multicast groups list 36 * @mcgrps: multicast groups used by this family (private)
37 * @n_mcgrps: number of multicast groups (private)
38 * @mcgrp_offset: starting number of multicast group IDs in this family
39 * @ops: the operations supported by this family (private)
40 * @n_ops: number of operations supported by this family (private)
45 */ 41 */
46struct genl_family { 42struct genl_family {
47 unsigned int id; 43 unsigned int id;
@@ -51,16 +47,19 @@ struct genl_family {
51 unsigned int maxattr; 47 unsigned int maxattr;
52 bool netnsok; 48 bool netnsok;
53 bool parallel_ops; 49 bool parallel_ops;
54 int (*pre_doit)(struct genl_ops *ops, 50 int (*pre_doit)(const struct genl_ops *ops,
55 struct sk_buff *skb, 51 struct sk_buff *skb,
56 struct genl_info *info); 52 struct genl_info *info);
57 void (*post_doit)(struct genl_ops *ops, 53 void (*post_doit)(const struct genl_ops *ops,
58 struct sk_buff *skb, 54 struct sk_buff *skb,
59 struct genl_info *info); 55 struct genl_info *info);
60 struct nlattr ** attrbuf; /* private */ 56 struct nlattr ** attrbuf; /* private */
61 struct list_head ops_list; /* private */ 57 const struct genl_ops * ops; /* private */
58 const struct genl_multicast_group *mcgrps; /* private */
59 unsigned int n_ops; /* private */
60 unsigned int n_mcgrps; /* private */
61 unsigned int mcgrp_offset; /* private */
62 struct list_head family_list; /* private */ 62 struct list_head family_list; /* private */
63 struct list_head mcast_groups; /* private */
64 struct module *module; 63 struct module *module;
65}; 64};
66 65
@@ -110,16 +109,15 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
110 * @ops_list: operations list 109 * @ops_list: operations list
111 */ 110 */
112struct genl_ops { 111struct genl_ops {
113 u8 cmd;
114 u8 internal_flags;
115 unsigned int flags;
116 const struct nla_policy *policy; 112 const struct nla_policy *policy;
117 int (*doit)(struct sk_buff *skb, 113 int (*doit)(struct sk_buff *skb,
118 struct genl_info *info); 114 struct genl_info *info);
119 int (*dumpit)(struct sk_buff *skb, 115 int (*dumpit)(struct sk_buff *skb,
120 struct netlink_callback *cb); 116 struct netlink_callback *cb);
121 int (*done)(struct netlink_callback *cb); 117 int (*done)(struct netlink_callback *cb);
122 struct list_head ops_list; 118 u8 cmd;
119 u8 internal_flags;
120 u8 flags;
123}; 121};
124 122
125int __genl_register_family(struct genl_family *family); 123int __genl_register_family(struct genl_family *family);
@@ -130,24 +128,53 @@ static inline int genl_register_family(struct genl_family *family)
130 return __genl_register_family(family); 128 return __genl_register_family(family);
131} 129}
132 130
133int __genl_register_family_with_ops(struct genl_family *family, 131/**
134 struct genl_ops *ops, size_t n_ops); 132 * genl_register_family_with_ops - register a generic netlink family with ops
135 133 * @family: generic netlink family
136static inline int genl_register_family_with_ops(struct genl_family *family, 134 * @ops: operations to be registered
137 struct genl_ops *ops, size_t n_ops) 135 * @n_ops: number of elements to register
136 *
137 * Registers the specified family and operations from the specified table.
138 * Only one family may be registered with the same family name or identifier.
139 *
140 * The family id may equal GENL_ID_GENERATE causing an unique id to
141 * be automatically generated and assigned.
142 *
143 * Either a doit or dumpit callback must be specified for every registered
144 * operation or the function will fail. Only one operation structure per
145 * command identifier may be registered.
146 *
147 * See include/net/genetlink.h for more documenation on the operations
148 * structure.
149 *
150 * Return 0 on success or a negative error code.
151 */
152static inline int
153_genl_register_family_with_ops_grps(struct genl_family *family,
154 const struct genl_ops *ops, size_t n_ops,
155 const struct genl_multicast_group *mcgrps,
156 size_t n_mcgrps)
138{ 157{
139 family->module = THIS_MODULE; 158 family->module = THIS_MODULE;
140 return __genl_register_family_with_ops(family, ops, n_ops); 159 family->ops = ops;
160 family->n_ops = n_ops;
161 family->mcgrps = mcgrps;
162 family->n_mcgrps = n_mcgrps;
163 return __genl_register_family(family);
141} 164}
142 165
166#define genl_register_family_with_ops(family, ops) \
167 _genl_register_family_with_ops_grps((family), \
168 (ops), ARRAY_SIZE(ops), \
169 NULL, 0)
170#define genl_register_family_with_ops_groups(family, ops, grps) \
171 _genl_register_family_with_ops_grps((family), \
172 (ops), ARRAY_SIZE(ops), \
173 (grps), ARRAY_SIZE(grps))
174
143int genl_unregister_family(struct genl_family *family); 175int genl_unregister_family(struct genl_family *family);
144int genl_register_ops(struct genl_family *, struct genl_ops *ops); 176void genl_notify(struct genl_family *family,
145int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); 177 struct sk_buff *skb, struct net *net, u32 portid,
146int genl_register_mc_group(struct genl_family *family,
147 struct genl_multicast_group *grp);
148void genl_unregister_mc_group(struct genl_family *family,
149 struct genl_multicast_group *grp);
150void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
151 u32 group, struct nlmsghdr *nlh, gfp_t flags); 178 u32 group, struct nlmsghdr *nlh, gfp_t flags);
152 179
153void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, 180void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
@@ -227,41 +254,51 @@ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
227 254
228/** 255/**
229 * genlmsg_multicast_netns - multicast a netlink message to a specific netns 256 * genlmsg_multicast_netns - multicast a netlink message to a specific netns
257 * @family: the generic netlink family
230 * @net: the net namespace 258 * @net: the net namespace
231 * @skb: netlink message as socket buffer 259 * @skb: netlink message as socket buffer
232 * @portid: own netlink portid to avoid sending to yourself 260 * @portid: own netlink portid to avoid sending to yourself
233 * @group: multicast group id 261 * @group: offset of multicast group in groups array
234 * @flags: allocation flags 262 * @flags: allocation flags
235 */ 263 */
236static inline int genlmsg_multicast_netns(struct net *net, struct sk_buff *skb, 264static inline int genlmsg_multicast_netns(struct genl_family *family,
265 struct net *net, struct sk_buff *skb,
237 u32 portid, unsigned int group, gfp_t flags) 266 u32 portid, unsigned int group, gfp_t flags)
238{ 267{
268 if (WARN_ON_ONCE(group >= family->n_mcgrps))
269 return -EINVAL;
270 group = family->mcgrp_offset + group;
239 return nlmsg_multicast(net->genl_sock, skb, portid, group, flags); 271 return nlmsg_multicast(net->genl_sock, skb, portid, group, flags);
240} 272}
241 273
242/** 274/**
243 * genlmsg_multicast - multicast a netlink message to the default netns 275 * genlmsg_multicast - multicast a netlink message to the default netns
276 * @family: the generic netlink family
244 * @skb: netlink message as socket buffer 277 * @skb: netlink message as socket buffer
245 * @portid: own netlink portid to avoid sending to yourself 278 * @portid: own netlink portid to avoid sending to yourself
246 * @group: multicast group id 279 * @group: offset of multicast group in groups array
247 * @flags: allocation flags 280 * @flags: allocation flags
248 */ 281 */
249static inline int genlmsg_multicast(struct sk_buff *skb, u32 portid, 282static inline int genlmsg_multicast(struct genl_family *family,
283 struct sk_buff *skb, u32 portid,
250 unsigned int group, gfp_t flags) 284 unsigned int group, gfp_t flags)
251{ 285{
252 return genlmsg_multicast_netns(&init_net, skb, portid, group, flags); 286 return genlmsg_multicast_netns(family, &init_net, skb,
287 portid, group, flags);
253} 288}
254 289
255/** 290/**
256 * genlmsg_multicast_allns - multicast a netlink message to all net namespaces 291 * genlmsg_multicast_allns - multicast a netlink message to all net namespaces
292 * @family: the generic netlink family
257 * @skb: netlink message as socket buffer 293 * @skb: netlink message as socket buffer
258 * @portid: own netlink portid to avoid sending to yourself 294 * @portid: own netlink portid to avoid sending to yourself
259 * @group: multicast group id 295 * @group: offset of multicast group in groups array
260 * @flags: allocation flags 296 * @flags: allocation flags
261 * 297 *
262 * This function must hold the RTNL or rcu_read_lock(). 298 * This function must hold the RTNL or rcu_read_lock().
263 */ 299 */
264int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid, 300int genlmsg_multicast_allns(struct genl_family *family,
301 struct sk_buff *skb, u32 portid,
265 unsigned int group, gfp_t flags); 302 unsigned int group, gfp_t flags);
266 303
267/** 304/**
@@ -332,5 +369,25 @@ static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags)
332 return nlmsg_new(genlmsg_total_size(payload), flags); 369 return nlmsg_new(genlmsg_total_size(payload), flags);
333} 370}
334 371
372/**
373 * genl_set_err - report error to genetlink broadcast listeners
374 * @family: the generic netlink family
375 * @net: the network namespace to report the error to
376 * @portid: the PORTID of a process that we want to skip (if any)
377 * @group: the broadcast group that will notice the error
378 * (this is the offset of the multicast group in the groups array)
379 * @code: error code, must be negative (as usual in kernelspace)
380 *
381 * This function returns the number of broadcast listeners that have set the
382 * NETLINK_RECV_NO_ENOBUFS socket option.
383 */
384static inline int genl_set_err(struct genl_family *family, struct net *net,
385 u32 portid, u32 group, int code)
386{
387 if (WARN_ON_ONCE(group >= family->n_mcgrps))
388 return -EINVAL;
389 group = family->mcgrp_offset + group;
390 return netlink_set_err(net->genl_sock, portid, group, code);
391}
335 392
336#endif /* __NET_GENERIC_NETLINK_H */ 393#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 217bc5bfc6c6..5a25f36fe3a7 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -473,7 +473,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
473int ip_ra_control(struct sock *sk, unsigned char on, 473int ip_ra_control(struct sock *sk, unsigned char on,
474 void (*destructor)(struct sock *)); 474 void (*destructor)(struct sock *));
475 475
476int ip_recv_error(struct sock *sk, struct msghdr *msg, int len); 476int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
477void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 477void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
478 u32 info, u8 *payload); 478 u32 info, u8 *payload);
479void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, 479void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 2a5f668cd683..488316e339a1 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -110,7 +110,8 @@ struct frag_hdr {
110 __be32 identification; 110 __be32 identification;
111}; 111};
112 112
113#define IP6_MF 0x0001 113#define IP6_MF 0x0001
114#define IP6_OFFSET 0xFFF8
114 115
115#include <net/sock.h> 116#include <net/sock.h>
116 117
@@ -776,8 +777,10 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
776 777
777int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); 778int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
778 779
779int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len); 780int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
780int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len); 781 int *addr_len);
782int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
783 int *addr_len);
781void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 784void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
782 u32 info, u8 *payload); 785 u32 info, u8 *payload);
783void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); 786void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
diff --git a/include/net/ping.h b/include/net/ping.h
index 3f67704f3747..90f48417b03d 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -31,7 +31,8 @@
31 31
32/* Compatibility glue so we can support IPv6 when it's compiled as a module */ 32/* Compatibility glue so we can support IPv6 when it's compiled as a module */
33struct pingv6_ops { 33struct pingv6_ops {
34 int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len); 34 int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
35 int *addr_len);
35 int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg, 36 int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg,
36 struct sk_buff *skb); 37 struct sk_buff *skb);
37 int (*icmpv6_err_convert)(u8 type, u8 code, int *err); 38 int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2174d8da0770..67b5d0068273 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -629,6 +629,7 @@ struct sctp_chunk {
629#define SCTP_NEED_FRTX 0x1 629#define SCTP_NEED_FRTX 0x1
630#define SCTP_DONT_FRTX 0x2 630#define SCTP_DONT_FRTX 0x2
631 __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ 631 __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
632 resent:1, /* Has this chunk ever been resent. */
632 has_tsn:1, /* Does this chunk have a TSN yet? */ 633 has_tsn:1, /* Does this chunk have a TSN yet? */
633 has_ssn:1, /* Does this chunk have a SSN yet? */ 634 has_ssn:1, /* Does this chunk have a SSN yet? */
634 singleton:1, /* Only chunk in the packet? */ 635 singleton:1, /* Only chunk in the packet? */
@@ -1725,12 +1726,6 @@ struct sctp_association {
1725 /* How many duplicated TSNs have we seen? */ 1726 /* How many duplicated TSNs have we seen? */
1726 int numduptsns; 1727 int numduptsns;
1727 1728
1728 /* Number of seconds of idle time before an association is closed.
1729 * In the association context, this is really used as a boolean
1730 * since the real timeout is stored in the timeouts array
1731 */
1732 __u32 autoclose;
1733
1734 /* These are to support 1729 /* These are to support
1735 * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses 1730 * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses
1736 * and Enforcement of Flow and Message Limits" 1731 * and Enforcement of Flow and Message Limits"
diff --git a/include/net/sock.h b/include/net/sock.h
index e3a18ff0c38b..2ef3c3eca47a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1035,7 +1035,6 @@ enum cg_proto_flags {
1035}; 1035};
1036 1036
1037struct cg_proto { 1037struct cg_proto {
1038 void (*enter_memory_pressure)(struct sock *sk);
1039 struct res_counter memory_allocated; /* Current allocated memory. */ 1038 struct res_counter memory_allocated; /* Current allocated memory. */
1040 struct percpu_counter sockets_allocated; /* Current number of sockets. */ 1039 struct percpu_counter sockets_allocated; /* Current number of sockets. */
1041 int memory_pressure; 1040 int memory_pressure;
@@ -1155,8 +1154,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
1155 struct proto *prot = sk->sk_prot; 1154 struct proto *prot = sk->sk_prot;
1156 1155
1157 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1156 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1158 if (cg_proto->memory_pressure) 1157 cg_proto->memory_pressure = 0;
1159 cg_proto->memory_pressure = 0;
1160 } 1158 }
1161 1159
1162} 1160}
@@ -1171,7 +1169,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk)
1171 struct proto *prot = sk->sk_prot; 1169 struct proto *prot = sk->sk_prot;
1172 1170
1173 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1171 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1174 cg_proto->enter_memory_pressure(sk); 1172 cg_proto->memory_pressure = 1;
1175 } 1173 }
1176 1174
1177 sk->sk_prot->enter_memory_pressure(sk); 1175 sk->sk_prot->enter_memory_pressure(sk);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 546084964d55..fe3b58e836c8 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -475,6 +475,9 @@ struct scsi_host_template {
475 */ 475 */
476 unsigned ordered_tag:1; 476 unsigned ordered_tag:1;
477 477
478 /* True if the controller does not support WRITE SAME */
479 unsigned no_write_same:1;
480
478 /* 481 /*
479 * Countdown for host blocking with no commands outstanding. 482 * Countdown for host blocking with no commands outstanding.
480 */ 483 */
@@ -677,6 +680,9 @@ struct Scsi_Host {
677 /* Don't resume host in EH */ 680 /* Don't resume host in EH */
678 unsigned eh_noresume:1; 681 unsigned eh_noresume:1;
679 682
683 /* The controller does not support WRITE SAME */
684 unsigned no_write_same:1;
685
680 /* 686 /*
681 * Optional work queue to be utilized by the transport 687 * Optional work queue to be utilized by the transport
682 */ 688 */
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index af9983970417..5f73785f5977 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -108,7 +108,7 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
108{ 108{
109 struct snd_sg_buf *sgbuf = dmab->private_data; 109 struct snd_sg_buf *sgbuf = dmab->private_data;
110 dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; 110 dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
111 addr &= PAGE_MASK; 111 addr &= ~((dma_addr_t)PAGE_SIZE - 1);
112 return addr + offset % PAGE_SIZE; 112 return addr + offset % PAGE_SIZE;
113} 113}
114 114
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 2037c45adfe6..56ebdfca6273 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -104,7 +104,8 @@ struct device;
104 SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \ 104 SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
105 .kcontrol_news = wcontrols, .num_kcontrols = 1} 105 .kcontrol_news = wcontrols, .num_kcontrols = 1}
106#define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \ 106#define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
107{ .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, \ 107{ .id = snd_soc_dapm_mux, .name = wname, \
108 SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
108 .kcontrol_news = wcontrols, .num_kcontrols = 1} 109 .kcontrol_news = wcontrols, .num_kcontrols = 1}
109#define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \ 110#define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \
110{ .id = snd_soc_dapm_virt_mux, .name = wname, \ 111{ .id = snd_soc_dapm_virt_mux, .name = wname, \
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 5ebe21cd5d1c..39e0114d70c5 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -34,6 +34,11 @@ struct se_subsystem_api {
34 sense_reason_t (*parse_cdb)(struct se_cmd *cmd); 34 sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
35 u32 (*get_device_type)(struct se_device *); 35 u32 (*get_device_type)(struct se_device *);
36 sector_t (*get_blocks)(struct se_device *); 36 sector_t (*get_blocks)(struct se_device *);
37 sector_t (*get_alignment_offset_lbas)(struct se_device *);
38 /* lbppbe = logical blocks per physical block exponent. see SBC-3 */
39 unsigned int (*get_lbppbe)(struct se_device *);
40 unsigned int (*get_io_min)(struct se_device *);
41 unsigned int (*get_io_opt)(struct se_device *);
37 unsigned char *(*get_sense_buffer)(struct se_cmd *); 42 unsigned char *(*get_sense_buffer)(struct se_cmd *);
38 bool (*get_write_cache)(struct se_device *); 43 bool (*get_write_cache)(struct se_device *);
39}; 44};
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 5bdb8b7d2a69..45412a6afa69 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -227,6 +227,7 @@ enum tcm_tmreq_table {
227 227
228/* fabric independent task management response values */ 228/* fabric independent task management response values */
229enum tcm_tmrsp_table { 229enum tcm_tmrsp_table {
230 TMR_FUNCTION_FAILED = 0,
230 TMR_FUNCTION_COMPLETE = 1, 231 TMR_FUNCTION_COMPLETE = 1,
231 TMR_TASK_DOES_NOT_EXIST = 2, 232 TMR_TASK_DOES_NOT_EXIST = 2,
232 TMR_LUN_DOES_NOT_EXIST = 3, 233 TMR_LUN_DOES_NOT_EXIST = 3,
@@ -282,11 +283,12 @@ struct t10_alua_lu_gp_member {
282struct t10_alua_tg_pt_gp { 283struct t10_alua_tg_pt_gp {
283 u16 tg_pt_gp_id; 284 u16 tg_pt_gp_id;
284 int tg_pt_gp_valid_id; 285 int tg_pt_gp_valid_id;
286 int tg_pt_gp_alua_supported_states;
285 int tg_pt_gp_alua_access_status; 287 int tg_pt_gp_alua_access_status;
286 int tg_pt_gp_alua_access_type; 288 int tg_pt_gp_alua_access_type;
287 int tg_pt_gp_nonop_delay_msecs; 289 int tg_pt_gp_nonop_delay_msecs;
288 int tg_pt_gp_trans_delay_msecs; 290 int tg_pt_gp_trans_delay_msecs;
289 int tg_pt_gp_implict_trans_secs; 291 int tg_pt_gp_implicit_trans_secs;
290 int tg_pt_gp_pref; 292 int tg_pt_gp_pref;
291 int tg_pt_gp_write_metadata; 293 int tg_pt_gp_write_metadata;
292 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ 294 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
@@ -442,7 +444,6 @@ struct se_cmd {
442 /* Used for sense data */ 444 /* Used for sense data */
443 void *sense_buffer; 445 void *sense_buffer;
444 struct list_head se_delayed_node; 446 struct list_head se_delayed_node;
445 struct list_head se_lun_node;
446 struct list_head se_qf_node; 447 struct list_head se_qf_node;
447 struct se_device *se_dev; 448 struct se_device *se_dev;
448 struct se_dev_entry *se_deve; 449 struct se_dev_entry *se_deve;
@@ -470,15 +471,11 @@ struct se_cmd {
470#define CMD_T_SENT (1 << 4) 471#define CMD_T_SENT (1 << 4)
471#define CMD_T_STOP (1 << 5) 472#define CMD_T_STOP (1 << 5)
472#define CMD_T_FAILED (1 << 6) 473#define CMD_T_FAILED (1 << 6)
473#define CMD_T_LUN_STOP (1 << 7) 474#define CMD_T_DEV_ACTIVE (1 << 7)
474#define CMD_T_LUN_FE_STOP (1 << 8) 475#define CMD_T_REQUEST_STOP (1 << 8)
475#define CMD_T_DEV_ACTIVE (1 << 9) 476#define CMD_T_BUSY (1 << 9)
476#define CMD_T_REQUEST_STOP (1 << 10)
477#define CMD_T_BUSY (1 << 11)
478 spinlock_t t_state_lock; 477 spinlock_t t_state_lock;
479 struct completion t_transport_stop_comp; 478 struct completion t_transport_stop_comp;
480 struct completion transport_lun_fe_stop_comp;
481 struct completion transport_lun_stop_comp;
482 479
483 struct work_struct work; 480 struct work_struct work;
484 481
@@ -498,6 +495,9 @@ struct se_cmd {
498 495
499 /* backend private data */ 496 /* backend private data */
500 void *priv; 497 void *priv;
498
499 /* Used for lun->lun_ref counting */
500 bool lun_ref_active;
501}; 501};
502 502
503struct se_ua { 503struct se_ua {
@@ -628,6 +628,34 @@ struct se_dev_attrib {
628 struct config_group da_group; 628 struct config_group da_group;
629}; 629};
630 630
631struct se_port_stat_grps {
632 struct config_group stat_group;
633 struct config_group scsi_port_group;
634 struct config_group scsi_tgt_port_group;
635 struct config_group scsi_transport_group;
636};
637
638struct se_lun {
639#define SE_LUN_LINK_MAGIC 0xffff7771
640 u32 lun_link_magic;
641 /* See transport_lun_status_table */
642 enum transport_lun_status_table lun_status;
643 u32 lun_access;
644 u32 lun_flags;
645 u32 unpacked_lun;
646 atomic_t lun_acl_count;
647 spinlock_t lun_acl_lock;
648 spinlock_t lun_sep_lock;
649 struct completion lun_shutdown_comp;
650 struct list_head lun_acl_list;
651 struct se_device *lun_se_dev;
652 struct se_port *lun_sep;
653 struct config_group lun_group;
654 struct se_port_stat_grps port_stat_grps;
655 struct completion lun_ref_comp;
656 struct percpu_ref lun_ref;
657};
658
631struct se_dev_stat_grps { 659struct se_dev_stat_grps {
632 struct config_group stat_group; 660 struct config_group stat_group;
633 struct config_group scsi_dev_group; 661 struct config_group scsi_dev_group;
@@ -656,11 +684,10 @@ struct se_device {
656 /* Pointer to transport specific device structure */ 684 /* Pointer to transport specific device structure */
657 u32 dev_index; 685 u32 dev_index;
658 u64 creation_time; 686 u64 creation_time;
659 u32 num_resets; 687 atomic_long_t num_resets;
660 u64 num_cmds; 688 atomic_long_t num_cmds;
661 u64 read_bytes; 689 atomic_long_t read_bytes;
662 u64 write_bytes; 690 atomic_long_t write_bytes;
663 spinlock_t stats_lock;
664 /* Active commands on this virtual SE device */ 691 /* Active commands on this virtual SE device */
665 atomic_t simple_cmds; 692 atomic_t simple_cmds;
666 atomic_t dev_ordered_id; 693 atomic_t dev_ordered_id;
@@ -711,6 +738,7 @@ struct se_device {
711 struct se_subsystem_api *transport; 738 struct se_subsystem_api *transport;
712 /* Linked list for struct se_hba struct se_device list */ 739 /* Linked list for struct se_hba struct se_device list */
713 struct list_head dev_list; 740 struct list_head dev_list;
741 struct se_lun xcopy_lun;
714}; 742};
715 743
716struct se_hba { 744struct se_hba {
@@ -730,34 +758,6 @@ struct se_hba {
730 struct se_subsystem_api *transport; 758 struct se_subsystem_api *transport;
731}; 759};
732 760
733struct se_port_stat_grps {
734 struct config_group stat_group;
735 struct config_group scsi_port_group;
736 struct config_group scsi_tgt_port_group;
737 struct config_group scsi_transport_group;
738};
739
740struct se_lun {
741#define SE_LUN_LINK_MAGIC 0xffff7771
742 u32 lun_link_magic;
743 /* See transport_lun_status_table */
744 enum transport_lun_status_table lun_status;
745 u32 lun_access;
746 u32 lun_flags;
747 u32 unpacked_lun;
748 atomic_t lun_acl_count;
749 spinlock_t lun_acl_lock;
750 spinlock_t lun_cmd_lock;
751 spinlock_t lun_sep_lock;
752 struct completion lun_shutdown_comp;
753 struct list_head lun_cmd_list;
754 struct list_head lun_acl_list;
755 struct se_device *lun_se_dev;
756 struct se_port *lun_sep;
757 struct config_group lun_group;
758 struct se_port_stat_grps port_stat_grps;
759};
760
761struct scsi_port_stats { 761struct scsi_port_stats {
762 u64 cmd_pdus; 762 u64 cmd_pdus;
763 u64 tx_data_octets; 763 u64 tx_data_octets;
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
index 713c5004f4ae..e0801386e4dc 100644
--- a/include/target/target_core_configfs.h
+++ b/include/target/target_core_configfs.h
@@ -54,4 +54,3 @@ struct target_fabric_configfs {
54 struct target_fabric_configfs_template tf_cit_tmpl; 54 struct target_fabric_configfs_template tf_cit_tmpl;
55}; 55};
56 56
57#define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl)
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 882b650e32be..4cf4fda404a3 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -137,6 +137,8 @@ void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
137void __target_execute_cmd(struct se_cmd *); 137void __target_execute_cmd(struct se_cmd *);
138int transport_lookup_tmr_lun(struct se_cmd *, u32); 138int transport_lookup_tmr_lun(struct se_cmd *, u32);
139 139
140struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
141 unsigned char *);
140struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, 142struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
141 unsigned char *); 143 unsigned char *);
142void core_tpg_clear_object_luns(struct se_portal_group *); 144void core_tpg_clear_object_luns(struct se_portal_group *);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index f18b3b76e01e..4832d75dcbae 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -162,12 +162,14 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
162 { EXTENT_FLAG_LOGGING, "LOGGING" }, \ 162 { EXTENT_FLAG_LOGGING, "LOGGING" }, \
163 { EXTENT_FLAG_FILLING, "FILLING" }) 163 { EXTENT_FLAG_FILLING, "FILLING" })
164 164
165TRACE_EVENT(btrfs_get_extent, 165TRACE_EVENT_CONDITION(btrfs_get_extent,
166 166
167 TP_PROTO(struct btrfs_root *root, struct extent_map *map), 167 TP_PROTO(struct btrfs_root *root, struct extent_map *map),
168 168
169 TP_ARGS(root, map), 169 TP_ARGS(root, map),
170 170
171 TP_CONDITION(map),
172
171 TP_STRUCT__entry( 173 TP_STRUCT__entry(
172 __field( u64, root_objectid ) 174 __field( u64, root_objectid )
173 __field( u64, start ) 175 __field( u64, start )
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 6b852f60f8ae..5c38606613d8 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -379,7 +379,8 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
379 __data_size += (len) * sizeof(type); 379 __data_size += (len) * sizeof(type);
380 380
381#undef __string 381#undef __string
382#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) 382#define __string(item, src) __dynamic_array(char, item, \
383 strlen((src) ? (const char *)(src) : "(null)") + 1)
383 384
384#undef DECLARE_EVENT_CLASS 385#undef DECLARE_EVENT_CLASS
385#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 386#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -508,7 +509,7 @@ static inline notrace int ftrace_get_offsets_##call( \
508 509
509#undef __assign_str 510#undef __assign_str
510#define __assign_str(dst, src) \ 511#define __assign_str(dst, src) \
511 strcpy(__get_str(dst), src); 512 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
512 513
513#undef TP_fast_assign 514#undef TP_fast_assign
514#define TP_fast_assign(args...) args 515#define TP_fast_assign(args...) args
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 46d41e8b0dcc..2f3f7ea8c77b 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -981,6 +981,8 @@ struct drm_radeon_cs {
981#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16 981#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
982/* query if CP DMA is supported on the compute ring */ 982/* query if CP DMA is supported on the compute ring */
983#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 983#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
984/* CIK macrotile mode array */
985#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
984 986
985 987
986struct drm_radeon_info { 988struct drm_radeon_info {
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index db0b825b4810..44b05a09f193 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -68,6 +68,9 @@
68#define AUDIT_MAKE_EQUIV 1015 /* Append to watched tree */ 68#define AUDIT_MAKE_EQUIV 1015 /* Append to watched tree */
69#define AUDIT_TTY_GET 1016 /* Get TTY auditing status */ 69#define AUDIT_TTY_GET 1016 /* Get TTY auditing status */
70#define AUDIT_TTY_SET 1017 /* Set TTY auditing status */ 70#define AUDIT_TTY_SET 1017 /* Set TTY auditing status */
71#define AUDIT_SET_FEATURE 1018 /* Turn an audit feature on or off */
72#define AUDIT_GET_FEATURE 1019 /* Get which features are enabled */
73#define AUDIT_FEATURE_CHANGE 1020 /* audit log listing feature changes */
71 74
72#define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */ 75#define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */
73#define AUDIT_USER_AVC 1107 /* We filter this differently */ 76#define AUDIT_USER_AVC 1107 /* We filter this differently */
@@ -357,6 +360,12 @@ enum {
357#define AUDIT_PERM_READ 4 360#define AUDIT_PERM_READ 4
358#define AUDIT_PERM_ATTR 8 361#define AUDIT_PERM_ATTR 8
359 362
363/* MAX_AUDIT_MESSAGE_LENGTH is set in audit:lib/libaudit.h as:
364 * 8970 // PATH_MAX*2+CONTEXT_SIZE*2+11+256+1
365 * max header+body+tailer: 44 + 29 + 32 + 262 + 7 + pad
366 */
367#define AUDIT_MESSAGE_TEXT_MAX 8560
368
360struct audit_status { 369struct audit_status {
361 __u32 mask; /* Bit mask for valid entries */ 370 __u32 mask; /* Bit mask for valid entries */
362 __u32 enabled; /* 1 = enabled, 0 = disabled */ 371 __u32 enabled; /* 1 = enabled, 0 = disabled */
@@ -368,11 +377,28 @@ struct audit_status {
368 __u32 backlog; /* messages waiting in queue */ 377 __u32 backlog; /* messages waiting in queue */
369}; 378};
370 379
380struct audit_features {
381#define AUDIT_FEATURE_VERSION 1
382 __u32 vers;
383 __u32 mask; /* which bits we are dealing with */
384 __u32 features; /* which feature to enable/disable */
385 __u32 lock; /* which features to lock */
386};
387
388#define AUDIT_FEATURE_ONLY_UNSET_LOGINUID 0
389#define AUDIT_FEATURE_LOGINUID_IMMUTABLE 1
390#define AUDIT_LAST_FEATURE AUDIT_FEATURE_LOGINUID_IMMUTABLE
391
392#define audit_feature_valid(x) ((x) >= 0 && (x) <= AUDIT_LAST_FEATURE)
393#define AUDIT_FEATURE_TO_MASK(x) (1 << ((x) & 31)) /* mask for __u32 */
394
371struct audit_tty_status { 395struct audit_tty_status {
372 __u32 enabled; /* 1 = enabled, 0 = disabled */ 396 __u32 enabled; /* 1 = enabled, 0 = disabled */
373 __u32 log_passwd; /* 1 = enabled, 0 = disabled */ 397 __u32 log_passwd; /* 1 = enabled, 0 = disabled */
374}; 398};
375 399
400#define AUDIT_UID_UNSET (unsigned int)-1
401
376/* audit_rule_data supports filter rules with both integer and string 402/* audit_rule_data supports filter rules with both integer and string
377 * fields. It corresponds with AUDIT_ADD_RULE, AUDIT_DEL_RULE and 403 * fields. It corresponds with AUDIT_ADD_RULE, AUDIT_DEL_RULE and
378 * AUDIT_LIST_RULES requests. 404 * AUDIT_LIST_RULES requests.
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 2c267bcbb85c..bc81fb2e1f0e 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -61,5 +61,16 @@ struct epoll_event {
61 __u64 data; 61 __u64 data;
62} EPOLL_PACKED; 62} EPOLL_PACKED;
63 63
64 64#ifdef CONFIG_PM_SLEEP
65static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
66{
67 if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
68 epev->events &= ~EPOLLWAKEUP;
69}
70#else
71static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
72{
73 epev->events &= ~EPOLLWAKEUP;
74}
75#endif
65#endif /* _UAPI_LINUX_EVENTPOLL_H */ 76#endif /* _UAPI_LINUX_EVENTPOLL_H */
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h
index c880a417d8a9..c3363ba1ae05 100644
--- a/include/uapi/linux/genetlink.h
+++ b/include/uapi/linux/genetlink.h
@@ -27,6 +27,8 @@ struct genlmsghdr {
27 */ 27 */
28#define GENL_ID_GENERATE 0 28#define GENL_ID_GENERATE 0
29#define GENL_ID_CTRL NLMSG_MIN_TYPE 29#define GENL_ID_CTRL NLMSG_MIN_TYPE
30#define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1)
31#define GENL_ID_PMCRAID (NLMSG_MIN_TYPE + 2)
30 32
31/************************************************************************** 33/**************************************************************************
32 * Controller 34 * Controller
diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h
new file mode 100644
index 000000000000..ca18c45f8304
--- /dev/null
+++ b/include/uapi/linux/hash_info.h
@@ -0,0 +1,37 @@
1/*
2 * Hash Info: Hash algorithms information
3 *
4 * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#ifndef _UAPI_LINUX_HASH_INFO_H
14#define _UAPI_LINUX_HASH_INFO_H
15
16enum hash_algo {
17 HASH_ALGO_MD4,
18 HASH_ALGO_MD5,
19 HASH_ALGO_SHA1,
20 HASH_ALGO_RIPE_MD_160,
21 HASH_ALGO_SHA256,
22 HASH_ALGO_SHA384,
23 HASH_ALGO_SHA512,
24 HASH_ALGO_SHA224,
25 HASH_ALGO_RIPE_MD_128,
26 HASH_ALGO_RIPE_MD_256,
27 HASH_ALGO_RIPE_MD_320,
28 HASH_ALGO_WP_256,
29 HASH_ALGO_WP_384,
30 HASH_ALGO_WP_512,
31 HASH_ALGO_TGR_128,
32 HASH_ALGO_TGR_160,
33 HASH_ALGO_TGR_192,
34 HASH_ALGO__LAST
35};
36
37#endif /* _UAPI_LINUX_HASH_INFO_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index b78566f59aba..6db460121f84 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -488,7 +488,9 @@ enum {
488 IFLA_HSR_UNSPEC, 488 IFLA_HSR_UNSPEC,
489 IFLA_HSR_SLAVE1, 489 IFLA_HSR_SLAVE1,
490 IFLA_HSR_SLAVE2, 490 IFLA_HSR_SLAVE2,
491 IFLA_HSR_MULTICAST_SPEC, 491 IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */
492 IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
493 IFLA_HSR_SEQ_NR,
492 __IFLA_HSR_MAX, 494 __IFLA_HSR_MAX,
493}; 495};
494 496
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index a3726275876d..ecc88592ecbe 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -719,6 +719,8 @@ struct input_keymap_entry {
719#define BTN_DPAD_LEFT 0x222 719#define BTN_DPAD_LEFT 0x222
720#define BTN_DPAD_RIGHT 0x223 720#define BTN_DPAD_RIGHT 0x223
721 721
722#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
723
722#define BTN_TRIGGER_HAPPY 0x2c0 724#define BTN_TRIGGER_HAPPY 0x2c0
723#define BTN_TRIGGER_HAPPY1 0x2c0 725#define BTN_TRIGGER_HAPPY1 0x2c0
724#define BTN_TRIGGER_HAPPY2 0x2c1 726#define BTN_TRIGGER_HAPPY2 0x2c1
@@ -856,6 +858,7 @@ struct input_keymap_entry {
856#define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */ 858#define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */
857#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */ 859#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */
858#define SW_LINEIN_INSERT 0x0d /* set = inserted */ 860#define SW_LINEIN_INSERT 0x0d /* set = inserted */
861#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
859#define SW_MAX 0x0f 862#define SW_MAX 0x0f
860#define SW_CNT (SW_MAX+1) 863#define SW_CNT (SW_MAX+1)
861 864
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index c9b7f4faf97a..840cb990abe2 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -56,5 +56,6 @@
56#define KEYCTL_REJECT 19 /* reject a partially constructed key */ 56#define KEYCTL_REJECT 19 /* reject a partially constructed key */
57#define KEYCTL_INSTANTIATE_IOV 20 /* instantiate a partially constructed key */ 57#define KEYCTL_INSTANTIATE_IOV 20 /* instantiate a partially constructed key */
58#define KEYCTL_INVALIDATE 21 /* invalidate a key */ 58#define KEYCTL_INVALIDATE 21 /* invalidate a key */
59#define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */
59 60
60#endif /* _LINUX_KEYCTL_H */ 61#endif /* _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h
index 17e7d95e4f53..6eb40244e019 100644
--- a/include/uapi/linux/mic_common.h
+++ b/include/uapi/linux/mic_common.h
@@ -23,12 +23,7 @@
23 23
24#include <linux/virtio_ring.h> 24#include <linux/virtio_ring.h>
25 25
26#ifndef __KERNEL__ 26#define __mic_align(a, x) (((a) + (x) - 1) & ~((x) - 1))
27#define ALIGN(a, x) (((a) + (x) - 1) & ~((x) - 1))
28#define __aligned(x) __attribute__ ((aligned(x)))
29#endif
30
31#define mic_aligned_size(x) ALIGN(sizeof(x), 8)
32 27
33/** 28/**
34 * struct mic_device_desc: Virtio device information shared between the 29 * struct mic_device_desc: Virtio device information shared between the
@@ -48,8 +43,8 @@ struct mic_device_desc {
48 __u8 feature_len; 43 __u8 feature_len;
49 __u8 config_len; 44 __u8 config_len;
50 __u8 status; 45 __u8 status;
51 __u64 config[0]; 46 __le64 config[0];
52} __aligned(8); 47} __attribute__ ((aligned(8)));
53 48
54/** 49/**
55 * struct mic_device_ctrl: Per virtio device information in the device page 50 * struct mic_device_ctrl: Per virtio device information in the device page
@@ -66,7 +61,7 @@ struct mic_device_desc {
66 * @h2c_vdev_db: The doorbell number to be used by host. Set by guest. 61 * @h2c_vdev_db: The doorbell number to be used by host. Set by guest.
67 */ 62 */
68struct mic_device_ctrl { 63struct mic_device_ctrl {
69 __u64 vdev; 64 __le64 vdev;
70 __u8 config_change; 65 __u8 config_change;
71 __u8 vdev_reset; 66 __u8 vdev_reset;
72 __u8 guest_ack; 67 __u8 guest_ack;
@@ -74,7 +69,7 @@ struct mic_device_ctrl {
74 __u8 used_address_updated; 69 __u8 used_address_updated;
75 __s8 c2h_vdev_db; 70 __s8 c2h_vdev_db;
76 __s8 h2c_vdev_db; 71 __s8 h2c_vdev_db;
77} __aligned(8); 72} __attribute__ ((aligned(8)));
78 73
79/** 74/**
80 * struct mic_bootparam: Virtio device independent information in device page 75 * struct mic_bootparam: Virtio device independent information in device page
@@ -87,13 +82,13 @@ struct mic_device_ctrl {
87 * @shutdown_card: Set to 1 by the host when a card shutdown is initiated 82 * @shutdown_card: Set to 1 by the host when a card shutdown is initiated
88 */ 83 */
89struct mic_bootparam { 84struct mic_bootparam {
90 __u32 magic; 85 __le32 magic;
91 __s8 c2h_shutdown_db; 86 __s8 c2h_shutdown_db;
92 __s8 h2c_shutdown_db; 87 __s8 h2c_shutdown_db;
93 __s8 h2c_config_db; 88 __s8 h2c_config_db;
94 __u8 shutdown_status; 89 __u8 shutdown_status;
95 __u8 shutdown_card; 90 __u8 shutdown_card;
96} __aligned(8); 91} __attribute__ ((aligned(8)));
97 92
98/** 93/**
99 * struct mic_device_page: High level representation of the device page 94 * struct mic_device_page: High level representation of the device page
@@ -116,10 +111,10 @@ struct mic_device_page {
116 * @num: The number of entries in the virtio_ring 111 * @num: The number of entries in the virtio_ring
117 */ 112 */
118struct mic_vqconfig { 113struct mic_vqconfig {
119 __u64 address; 114 __le64 address;
120 __u64 used_address; 115 __le64 used_address;
121 __u16 num; 116 __le16 num;
122} __aligned(8); 117} __attribute__ ((aligned(8)));
123 118
124/* 119/*
125 * The alignment to use between consumer and producer parts of vring. 120 * The alignment to use between consumer and producer parts of vring.
@@ -154,7 +149,7 @@ struct mic_vqconfig {
154 */ 149 */
155struct _mic_vring_info { 150struct _mic_vring_info {
156 __u16 avail_idx; 151 __u16 avail_idx;
157 int magic; 152 __le32 magic;
158}; 153};
159 154
160/** 155/**
@@ -173,15 +168,13 @@ struct mic_vring {
173 int len; 168 int len;
174}; 169};
175 170
176#define mic_aligned_desc_size(d) ALIGN(mic_desc_size(d), 8) 171#define mic_aligned_desc_size(d) __mic_align(mic_desc_size(d), 8)
177 172
178#ifndef INTEL_MIC_CARD 173#ifndef INTEL_MIC_CARD
179static inline unsigned mic_desc_size(const struct mic_device_desc *desc) 174static inline unsigned mic_desc_size(const struct mic_device_desc *desc)
180{ 175{
181 return mic_aligned_size(*desc) 176 return sizeof(*desc) + desc->num_vq * sizeof(struct mic_vqconfig)
182 + desc->num_vq * mic_aligned_size(struct mic_vqconfig) 177 + desc->feature_len * 2 + desc->config_len;
183 + desc->feature_len * 2
184 + desc->config_len;
185} 178}
186 179
187static inline struct mic_vqconfig * 180static inline struct mic_vqconfig *
@@ -201,8 +194,7 @@ static inline __u8 *mic_vq_configspace(const struct mic_device_desc *desc)
201} 194}
202static inline unsigned mic_total_desc_size(struct mic_device_desc *desc) 195static inline unsigned mic_total_desc_size(struct mic_device_desc *desc)
203{ 196{
204 return mic_aligned_desc_size(desc) + 197 return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
205 mic_aligned_size(struct mic_device_ctrl);
206} 198}
207#endif 199#endif
208 200
diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h
index 4e31db4eea41..f2159d30d1f5 100644
--- a/include/uapi/linux/netlink_diag.h
+++ b/include/uapi/linux/netlink_diag.h
@@ -33,6 +33,7 @@ struct netlink_diag_ring {
33}; 33};
34 34
35enum { 35enum {
36 /* NETLINK_DIAG_NONE, standard nl API requires this attribute! */
36 NETLINK_DIAG_MEMINFO, 37 NETLINK_DIAG_MEMINFO,
37 NETLINK_DIAG_GROUPS, 38 NETLINK_DIAG_GROUPS,
38 NETLINK_DIAG_RX_RING, 39 NETLINK_DIAG_RX_RING,
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index b2cc0cd9c4d9..d08c63f3dd6f 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -29,6 +29,7 @@ struct packet_diag_msg {
29}; 29};
30 30
31enum { 31enum {
32 /* PACKET_DIAG_NONE, standard nl API requires this attribute! */
32 PACKET_DIAG_INFO, 33 PACKET_DIAG_INFO,
33 PACKET_DIAG_MCLIST, 34 PACKET_DIAG_MCLIST,
34 PACKET_DIAG_RX_RING, 35 PACKET_DIAG_RX_RING,
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 0890556f779e..4a98e85438a7 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -13,10 +13,10 @@
13 * PCI to PCI Bridge Specification 13 * PCI to PCI Bridge Specification
14 * PCI System Design Guide 14 * PCI System Design Guide
15 * 15 *
16 * For hypertransport information, please consult the following manuals 16 * For HyperTransport information, please consult the following manuals
17 * from http://www.hypertransport.org 17 * from http://www.hypertransport.org
18 * 18 *
19 * The Hypertransport I/O Link Specification 19 * The HyperTransport I/O Link Specification
20 */ 20 */
21 21
22#ifndef LINUX_PCI_REGS_H 22#ifndef LINUX_PCI_REGS_H
@@ -37,7 +37,7 @@
37#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */ 37#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
38#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */ 38#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
39#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */ 39#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
40#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */ 40#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
41#define PCI_COMMAND_SERR 0x100 /* Enable SERR */ 41#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
42#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */ 42#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
43#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */ 43#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
@@ -45,7 +45,7 @@
45#define PCI_STATUS 0x06 /* 16 bits */ 45#define PCI_STATUS 0x06 /* 16 bits */
46#define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */ 46#define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */
47#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ 47#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
48#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */ 48#define PCI_STATUS_66MHZ 0x20 /* Support 66 MHz PCI 2.1 bus */
49#define PCI_STATUS_UDF 0x40 /* Support User Definable Features [obsolete] */ 49#define PCI_STATUS_UDF 0x40 /* Support User Definable Features [obsolete] */
50#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */ 50#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
51#define PCI_STATUS_PARITY 0x100 /* Detected parity error */ 51#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
@@ -205,14 +205,14 @@
205#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */ 205#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */
206#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */ 206#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */
207#define PCI_CAP_ID_HT 0x08 /* HyperTransport */ 207#define PCI_CAP_ID_HT 0x08 /* HyperTransport */
208#define PCI_CAP_ID_VNDR 0x09 /* Vendor specific */ 208#define PCI_CAP_ID_VNDR 0x09 /* Vendor-Specific */
209#define PCI_CAP_ID_DBG 0x0A /* Debug port */ 209#define PCI_CAP_ID_DBG 0x0A /* Debug port */
210#define PCI_CAP_ID_CCRC 0x0B /* CompactPCI Central Resource Control */ 210#define PCI_CAP_ID_CCRC 0x0B /* CompactPCI Central Resource Control */
211#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */ 211#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
212#define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */ 212#define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */
213#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */ 213#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */
214#define PCI_CAP_ID_SECDEV 0x0F /* Secure Device */ 214#define PCI_CAP_ID_SECDEV 0x0F /* Secure Device */
215#define PCI_CAP_ID_EXP 0x10 /* PCI Express */ 215#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
216#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ 216#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
217#define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */ 217#define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */
218#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */ 218#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */
@@ -268,8 +268,8 @@
268#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */ 268#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */
269#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */ 269#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */
270#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */ 270#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */
271#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */ 271#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */
272#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */ 272#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */
273#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */ 273#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */
274#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */ 274#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */
275#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */ 275#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */
@@ -321,7 +321,7 @@
321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ 321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
322#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ 322#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
323 323
324/* MSI-X entry's format */ 324/* MSI-X Table entry format */
325#define PCI_MSIX_ENTRY_SIZE 16 325#define PCI_MSIX_ENTRY_SIZE 16
326#define PCI_MSIX_ENTRY_LOWER_ADDR 0 326#define PCI_MSIX_ENTRY_LOWER_ADDR 0
327#define PCI_MSIX_ENTRY_UPPER_ADDR 4 327#define PCI_MSIX_ENTRY_UPPER_ADDR 4
@@ -372,7 +372,7 @@
372#define PCI_X_CMD_SPLIT_16 0x0060 /* Max 16 */ 372#define PCI_X_CMD_SPLIT_16 0x0060 /* Max 16 */
373#define PCI_X_CMD_SPLIT_32 0x0070 /* Max 32 */ 373#define PCI_X_CMD_SPLIT_32 0x0070 /* Max 32 */
374#define PCI_X_CMD_MAX_SPLIT 0x0070 /* Max Outstanding Split Transactions */ 374#define PCI_X_CMD_MAX_SPLIT 0x0070 /* Max Outstanding Split Transactions */
375#define PCI_X_CMD_VERSION(x) (((x) >> 12) & 3) /* Version */ 375#define PCI_X_CMD_VERSION(x) (((x) >> 12) & 3) /* Version */
376#define PCI_X_STATUS 4 /* PCI-X capabilities */ 376#define PCI_X_STATUS 4 /* PCI-X capabilities */
377#define PCI_X_STATUS_DEVFN 0x000000ff /* A copy of devfn */ 377#define PCI_X_STATUS_DEVFN 0x000000ff /* A copy of devfn */
378#define PCI_X_STATUS_BUS 0x0000ff00 /* A copy of bus nr */ 378#define PCI_X_STATUS_BUS 0x0000ff00 /* A copy of bus nr */
@@ -407,8 +407,8 @@
407 407
408/* PCI Bridge Subsystem ID registers */ 408/* PCI Bridge Subsystem ID registers */
409 409
410#define PCI_SSVID_VENDOR_ID 4 /* PCI-Bridge subsystem vendor id register */ 410#define PCI_SSVID_VENDOR_ID 4 /* PCI Bridge subsystem vendor ID */
411#define PCI_SSVID_DEVICE_ID 6 /* PCI-Bridge subsystem device id register */ 411#define PCI_SSVID_DEVICE_ID 6 /* PCI Bridge subsystem device ID */
412 412
413/* PCI Express capability registers */ 413/* PCI Express capability registers */
414 414
@@ -484,12 +484,12 @@
484#define PCI_EXP_LNKCTL_CLKREQ_EN 0x0100 /* Enable clkreq */ 484#define PCI_EXP_LNKCTL_CLKREQ_EN 0x0100 /* Enable clkreq */
485#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */ 485#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */
486#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */ 486#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */
487#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Lnk Autonomous Bandwidth Interrupt Enable */ 487#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Link Autonomous Bandwidth Interrupt Enable */
488#define PCI_EXP_LNKSTA 18 /* Link Status */ 488#define PCI_EXP_LNKSTA 18 /* Link Status */
489#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ 489#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
490#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */ 490#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
491#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */ 491#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
492#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */ 492#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
493#define PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */ 493#define PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */
494#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */ 494#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */
495#define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */ 495#define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */
@@ -593,7 +593,7 @@
593#define PCI_EXT_CAP_ID_MFVC 0x08 /* Multi-Function VC Capability */ 593#define PCI_EXT_CAP_ID_MFVC 0x08 /* Multi-Function VC Capability */
594#define PCI_EXT_CAP_ID_VC9 0x09 /* same as _VC */ 594#define PCI_EXT_CAP_ID_VC9 0x09 /* same as _VC */
595#define PCI_EXT_CAP_ID_RCRB 0x0A /* Root Complex RB? */ 595#define PCI_EXT_CAP_ID_RCRB 0x0A /* Root Complex RB? */
596#define PCI_EXT_CAP_ID_VNDR 0x0B /* Vendor Specific */ 596#define PCI_EXT_CAP_ID_VNDR 0x0B /* Vendor-Specific */
597#define PCI_EXT_CAP_ID_CAC 0x0C /* Config Access - obsolete */ 597#define PCI_EXT_CAP_ID_CAC 0x0C /* Config Access - obsolete */
598#define PCI_EXT_CAP_ID_ACS 0x0D /* Access Control Services */ 598#define PCI_EXT_CAP_ID_ACS 0x0D /* Access Control Services */
599#define PCI_EXT_CAP_ID_ARI 0x0E /* Alternate Routing ID */ 599#define PCI_EXT_CAP_ID_ARI 0x0E /* Alternate Routing ID */
@@ -602,12 +602,12 @@
602#define PCI_EXT_CAP_ID_MRIOV 0x11 /* Multi Root I/O Virtualization */ 602#define PCI_EXT_CAP_ID_MRIOV 0x11 /* Multi Root I/O Virtualization */
603#define PCI_EXT_CAP_ID_MCAST 0x12 /* Multicast */ 603#define PCI_EXT_CAP_ID_MCAST 0x12 /* Multicast */
604#define PCI_EXT_CAP_ID_PRI 0x13 /* Page Request Interface */ 604#define PCI_EXT_CAP_ID_PRI 0x13 /* Page Request Interface */
605#define PCI_EXT_CAP_ID_AMD_XXX 0x14 /* reserved for AMD */ 605#define PCI_EXT_CAP_ID_AMD_XXX 0x14 /* Reserved for AMD */
606#define PCI_EXT_CAP_ID_REBAR 0x15 /* resizable BAR */ 606#define PCI_EXT_CAP_ID_REBAR 0x15 /* Resizable BAR */
607#define PCI_EXT_CAP_ID_DPA 0x16 /* dynamic power alloc */ 607#define PCI_EXT_CAP_ID_DPA 0x16 /* Dynamic Power Allocation */
608#define PCI_EXT_CAP_ID_TPH 0x17 /* TPH request */ 608#define PCI_EXT_CAP_ID_TPH 0x17 /* TPH Requester */
609#define PCI_EXT_CAP_ID_LTR 0x18 /* latency tolerance reporting */ 609#define PCI_EXT_CAP_ID_LTR 0x18 /* Latency Tolerance Reporting */
610#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe */ 610#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe Capability */
611#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */ 611#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
612#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */ 612#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
613#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID 613#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID
@@ -667,9 +667,9 @@
667#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */ 667#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
668/* Multi ERR_COR Received */ 668/* Multi ERR_COR Received */
669#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 669#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002
670/* ERR_FATAL/NONFATAL Recevied */ 670/* ERR_FATAL/NONFATAL Received */
671#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 671#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004
672/* Multi ERR_FATAL/NONFATAL Recevied */ 672/* Multi ERR_FATAL/NONFATAL Received */
673#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008 673#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008
674#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */ 674#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */
675#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */ 675#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
@@ -678,7 +678,7 @@
678 678
679/* Virtual Channel */ 679/* Virtual Channel */
680#define PCI_VC_PORT_REG1 4 680#define PCI_VC_PORT_REG1 4
681#define PCI_VC_REG1_EVCC 0x7 /* extended vc count */ 681#define PCI_VC_REG1_EVCC 0x7 /* extended VC count */
682#define PCI_VC_PORT_REG2 8 682#define PCI_VC_PORT_REG2 8
683#define PCI_VC_REG2_32_PHASE 0x2 683#define PCI_VC_REG2_32_PHASE 0x2
684#define PCI_VC_REG2_64_PHASE 0x4 684#define PCI_VC_REG2_64_PHASE 0x4
@@ -711,7 +711,7 @@
711#define PCI_VNDR_HEADER_LEN(x) (((x) >> 20) & 0xfff) 711#define PCI_VNDR_HEADER_LEN(x) (((x) >> 20) & 0xfff)
712 712
713/* 713/*
714 * Hypertransport sub capability types 714 * HyperTransport sub capability types
715 * 715 *
716 * Unfortunately there are both 3 bit and 5 bit capability types defined 716 * Unfortunately there are both 3 bit and 5 bit capability types defined
717 * in the HT spec, catering for that is a little messy. You probably don't 717 * in the HT spec, catering for that is a little messy. You probably don't
@@ -739,8 +739,8 @@
739#define HT_CAPTYPE_DIRECT_ROUTE 0xB0 /* Direct routing configuration */ 739#define HT_CAPTYPE_DIRECT_ROUTE 0xB0 /* Direct routing configuration */
740#define HT_CAPTYPE_VCSET 0xB8 /* Virtual Channel configuration */ 740#define HT_CAPTYPE_VCSET 0xB8 /* Virtual Channel configuration */
741#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */ 741#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */
742#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */ 742#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 HyperTransport configuration */
743#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */ 743#define HT_CAPTYPE_PM 0xE0 /* HyperTransport power management configuration */
744#define HT_CAP_SIZEOF_LONG 28 /* slave & primary */ 744#define HT_CAP_SIZEOF_LONG 28 /* slave & primary */
745#define HT_CAP_SIZEOF_SHORT 24 /* host & secondary */ 745#define HT_CAP_SIZEOF_SHORT 24 /* host & secondary */
746 746
@@ -777,14 +777,14 @@
777#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */ 777#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
778#define PCI_EXT_CAP_PRI_SIZEOF 16 778#define PCI_EXT_CAP_PRI_SIZEOF 16
779 779
780/* PASID capability */ 780/* Process Address Space ID */
781#define PCI_PASID_CAP 0x04 /* PASID feature register */ 781#define PCI_PASID_CAP 0x04 /* PASID feature register */
782#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */ 782#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
783#define PCI_PASID_CAP_PRIV 0x04 /* Priviledge Mode Supported */ 783#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */
784#define PCI_PASID_CTRL 0x06 /* PASID control register */ 784#define PCI_PASID_CTRL 0x06 /* PASID control register */
785#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */ 785#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
786#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */ 786#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
787#define PCI_PASID_CTRL_PRIV 0x04 /* Priviledge Mode Enable */ 787#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */
788#define PCI_EXT_CAP_PASID_SIZEOF 8 788#define PCI_EXT_CAP_PASID_SIZEOF 8
789 789
790/* Single Root I/O Virtualization */ 790/* Single Root I/O Virtualization */
@@ -839,22 +839,22 @@
839#define PCI_ACS_CTRL 0x06 /* ACS Control Register */ 839#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
840#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */ 840#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
841 841
842#define PCI_VSEC_HDR 4 /* extended cap - vendor specific */ 842#define PCI_VSEC_HDR 4 /* extended cap - vendor-specific */
843#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */ 843#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */
844 844
845/* sata capability */ 845/* SATA capability */
846#define PCI_SATA_REGS 4 /* SATA REGs specifier */ 846#define PCI_SATA_REGS 4 /* SATA REGs specifier */
847#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */ 847#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */
848#define PCI_SATA_REGS_INLINE 0xF /* REGS in config space */ 848#define PCI_SATA_REGS_INLINE 0xF /* REGS in config space */
849#define PCI_SATA_SIZEOF_SHORT 8 849#define PCI_SATA_SIZEOF_SHORT 8
850#define PCI_SATA_SIZEOF_LONG 16 850#define PCI_SATA_SIZEOF_LONG 16
851 851
852/* resizable BARs */ 852/* Resizable BARs */
853#define PCI_REBAR_CTRL 8 /* control register */ 853#define PCI_REBAR_CTRL 8 /* control register */
854#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */ 854#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */
855#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */ 855#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */
856 856
857/* dynamic power allocation */ 857/* Dynamic Power Allocation */
858#define PCI_DPA_CAP 4 /* capability register */ 858#define PCI_DPA_CAP 4 /* capability register */
859#define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */ 859#define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */
860#define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */ 860#define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 307f293477e8..a806687ad98f 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -763,13 +763,14 @@ enum {
763 763
764 TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */ 764 TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
765 765
766 TCA_FQ_FLOW_DEFAULT_RATE,/* for sockets with unspecified sk_rate, 766 TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
767 * use the following rate
768 */
769 767
770 TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ 768 TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
771 769
772 TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ 770 TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
771
772 TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
773
773 __TCA_FQ_MAX 774 __TCA_FQ_MAX
774}; 775};
775 776
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index fe1a5406d4d9..f7cf7f351144 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -16,6 +16,7 @@
16#define _MD_P_H 16#define _MD_P_H
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19#include <asm/byteorder.h>
19 20
20/* 21/*
21 * RAID superblock. 22 * RAID superblock.
diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h
index b9e2a6a7446f..1eb0b8dd1830 100644
--- a/include/uapi/linux/unix_diag.h
+++ b/include/uapi/linux/unix_diag.h
@@ -31,6 +31,7 @@ struct unix_diag_msg {
31}; 31};
32 32
33enum { 33enum {
34 /* UNIX_DIAG_NONE, standard nl API requires this attribute! */
34 UNIX_DIAG_NAME, 35 UNIX_DIAG_NAME,
35 UNIX_DIAG_VFS, 36 UNIX_DIAG_VFS,
36 UNIX_DIAG_PEER, 37 UNIX_DIAG_PEER,
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index d630163b9a2e..5759810e1c1b 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -30,7 +30,7 @@
30#include <sound/compress_params.h> 30#include <sound/compress_params.h>
31 31
32 32
33#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1) 33#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
34/** 34/**
35 * struct snd_compressed_buffer: compressed buffer 35 * struct snd_compressed_buffer: compressed buffer
36 * @fragment_size: size of buffer fragment in bytes 36 * @fragment_size: size of buffer fragment in bytes
@@ -67,8 +67,8 @@ struct snd_compr_params {
67struct snd_compr_tstamp { 67struct snd_compr_tstamp {
68 __u32 byte_offset; 68 __u32 byte_offset;
69 __u32 copied_total; 69 __u32 copied_total;
70 snd_pcm_uframes_t pcm_frames; 70 __u32 pcm_frames;
71 snd_pcm_uframes_t pcm_io_frames; 71 __u32 pcm_io_frames;
72 __u32 sampling_rate; 72 __u32 sampling_rate;
73}; 73};
74 74
diff --git a/init/Kconfig b/init/Kconfig
index 3fc8a2f2fac4..79383d3aa5dc 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -301,20 +301,6 @@ config AUDIT_TREE
301 depends on AUDITSYSCALL 301 depends on AUDITSYSCALL
302 select FSNOTIFY 302 select FSNOTIFY
303 303
304config AUDIT_LOGINUID_IMMUTABLE
305 bool "Make audit loginuid immutable"
306 depends on AUDIT
307 help
308 The config option toggles if a task setting its loginuid requires
309 CAP_SYS_AUDITCONTROL or if that task should require no special permissions
310 but should instead only allow setting its loginuid if it was never
311 previously set. On systems which use systemd or a similar central
312 process to restart login services this should be set to true. On older
313 systems in which an admin would typically have to directly stop and
314 start processes this should be set to false. Setting this to true allows
315 one to drop potentially dangerous capabilites from the login tasks,
316 but may not be backwards compatible with older init systems.
317
318source "kernel/irq/Kconfig" 304source "kernel/irq/Kconfig"
319source "kernel/time/Kconfig" 305source "kernel/time/Kconfig"
320 306
@@ -1669,6 +1655,18 @@ config BASE_SMALL
1669 default 0 if BASE_FULL 1655 default 0 if BASE_FULL
1670 default 1 if !BASE_FULL 1656 default 1 if !BASE_FULL
1671 1657
1658config SYSTEM_TRUSTED_KEYRING
1659 bool "Provide system-wide ring of trusted keys"
1660 depends on KEYS
1661 help
1662 Provide a system keyring to which trusted keys can be added. Keys in
1663 the keyring are considered to be trusted. Keys may be added at will
1664 by the kernel from compiled-in data and from hardware key stores, but
1665 userspace may only add extra keys if those keys can be verified by
1666 keys already in the keyring.
1667
1668 Keys in this keyring are used by module signature checking.
1669
1672menuconfig MODULES 1670menuconfig MODULES
1673 bool "Enable loadable module support" 1671 bool "Enable loadable module support"
1674 option modules 1672 option modules
@@ -1742,6 +1740,7 @@ config MODULE_SRCVERSION_ALL
1742config MODULE_SIG 1740config MODULE_SIG
1743 bool "Module signature verification" 1741 bool "Module signature verification"
1744 depends on MODULES 1742 depends on MODULES
1743 select SYSTEM_TRUSTED_KEYRING
1745 select KEYS 1744 select KEYS
1746 select CRYPTO 1745 select CRYPTO
1747 select ASYMMETRIC_KEY_TYPE 1746 select ASYMMETRIC_KEY_TYPE
diff --git a/init/main.c b/init/main.c
index 01573fdfa186..febc511e078a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -476,7 +476,7 @@ static void __init mm_init(void)
476 mem_init(); 476 mem_init();
477 kmem_cache_init(); 477 kmem_cache_init();
478 percpu_init_late(); 478 percpu_init_late();
479 pgtable_init(); 479 pgtable_cache_init();
480 vmalloc_init(); 480 vmalloc_init();
481} 481}
482 482
diff --git a/ipc/shm.c b/ipc/shm.c
index d69739610fd4..7a51443a51d6 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -208,15 +208,18 @@ static void shm_open(struct vm_area_struct *vma)
208 */ 208 */
209static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 209static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
210{ 210{
211 struct file *shm_file;
212
213 shm_file = shp->shm_file;
214 shp->shm_file = NULL;
211 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 215 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
212 shm_rmid(ns, shp); 216 shm_rmid(ns, shp);
213 shm_unlock(shp); 217 shm_unlock(shp);
214 if (!is_file_hugepages(shp->shm_file)) 218 if (!is_file_hugepages(shm_file))
215 shmem_lock(shp->shm_file, 0, shp->mlock_user); 219 shmem_lock(shm_file, 0, shp->mlock_user);
216 else if (shp->mlock_user) 220 else if (shp->mlock_user)
217 user_shm_unlock(file_inode(shp->shm_file)->i_size, 221 user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
218 shp->mlock_user); 222 fput(shm_file);
219 fput (shp->shm_file);
220 ipc_rcu_putref(shp, shm_rcu_free); 223 ipc_rcu_putref(shp, shm_rcu_free);
221} 224}
222 225
@@ -974,15 +977,25 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
974 ipc_lock_object(&shp->shm_perm); 977 ipc_lock_object(&shp->shm_perm);
975 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { 978 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
976 kuid_t euid = current_euid(); 979 kuid_t euid = current_euid();
977 err = -EPERM;
978 if (!uid_eq(euid, shp->shm_perm.uid) && 980 if (!uid_eq(euid, shp->shm_perm.uid) &&
979 !uid_eq(euid, shp->shm_perm.cuid)) 981 !uid_eq(euid, shp->shm_perm.cuid)) {
982 err = -EPERM;
980 goto out_unlock0; 983 goto out_unlock0;
981 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) 984 }
985 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
986 err = -EPERM;
982 goto out_unlock0; 987 goto out_unlock0;
988 }
983 } 989 }
984 990
985 shm_file = shp->shm_file; 991 shm_file = shp->shm_file;
992
993 /* check if shm_destroy() is tearing down shp */
994 if (shm_file == NULL) {
995 err = -EIDRM;
996 goto out_unlock0;
997 }
998
986 if (is_file_hugepages(shm_file)) 999 if (is_file_hugepages(shm_file))
987 goto out_unlock0; 1000 goto out_unlock0;
988 1001
@@ -1101,6 +1114,14 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1101 goto out_unlock; 1114 goto out_unlock;
1102 1115
1103 ipc_lock_object(&shp->shm_perm); 1116 ipc_lock_object(&shp->shm_perm);
1117
1118 /* check if shm_destroy() is tearing down shp */
1119 if (shp->shm_file == NULL) {
1120 ipc_unlock_object(&shp->shm_perm);
1121 err = -EIDRM;
1122 goto out_unlock;
1123 }
1124
1104 path = shp->shm_file->f_path; 1125 path = shp->shm_file->f_path;
1105 path_get(&path); 1126 path_get(&path);
1106 shp->shm_nattch++; 1127 shp->shm_nattch++;
diff --git a/kernel/.gitignore b/kernel/.gitignore
index b3097bde4e9c..790d83c7d160 100644
--- a/kernel/.gitignore
+++ b/kernel/.gitignore
@@ -5,3 +5,4 @@ config_data.h
5config_data.gz 5config_data.gz
6timeconst.h 6timeconst.h
7hz.bc 7hz.bc
8x509_certificate_list
diff --git a/kernel/Makefile b/kernel/Makefile
index 09a9c94f42bd..bbaf7d59c1bb 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -41,8 +41,9 @@ ifneq ($(CONFIG_SMP),y)
41obj-y += up.o 41obj-y += up.o
42endif 42endif
43obj-$(CONFIG_UID16) += uid16.o 43obj-$(CONFIG_UID16) += uid16.o
44obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
44obj-$(CONFIG_MODULES) += module.o 45obj-$(CONFIG_MODULES) += module.o
45obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o modsign_certificate.o 46obj-$(CONFIG_MODULE_SIG) += module_signing.o
46obj-$(CONFIG_KALLSYMS) += kallsyms.o 47obj-$(CONFIG_KALLSYMS) += kallsyms.o
47obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o 48obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
48obj-$(CONFIG_KEXEC) += kexec.o 49obj-$(CONFIG_KEXEC) += kexec.o
@@ -122,19 +123,52 @@ targets += timeconst.h
122$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE 123$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
123 $(call if_changed,bc) 124 $(call if_changed,bc)
124 125
125ifeq ($(CONFIG_MODULE_SIG),y) 126###############################################################################
127#
128# Roll all the X.509 certificates that we can find together and pull them into
129# the kernel so that they get loaded into the system trusted keyring during
130# boot.
126# 131#
127# Pull the signing certificate and any extra certificates into the kernel 132# We look in the source root and the build root for all files whose name ends
133# in ".x509". Unfortunately, this will generate duplicate filenames, so we
134# have make canonicalise the pathnames and then sort them to discard the
135# duplicates.
128# 136#
137###############################################################################
138ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
139X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509)
140X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509
141X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
142 $(or $(realpath $(CERT)),$(CERT))))
143
144ifeq ($(X509_CERTIFICATES),)
145$(warning *** No X.509 certificates found ***)
146endif
147
148ifneq ($(wildcard $(obj)/.x509.list),)
149ifneq ($(shell cat $(obj)/.x509.list),$(X509_CERTIFICATES))
150$(info X.509 certificate list changed)
151$(shell rm $(obj)/.x509.list)
152endif
153endif
154
155kernel/system_certificates.o: $(obj)/x509_certificate_list
129 156
130quiet_cmd_touch = TOUCH $@ 157quiet_cmd_x509certs = CERTS $@
131 cmd_touch = touch $@ 158 cmd_x509certs = cat $(X509_CERTIFICATES) /dev/null >$@ $(foreach X509,$(X509_CERTIFICATES),; echo " - Including cert $(X509)")
132 159
133extra_certificates: 160targets += $(obj)/x509_certificate_list
134 $(call cmd,touch) 161$(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
162 $(call if_changed,x509certs)
135 163
136kernel/modsign_certificate.o: signing_key.x509 extra_certificates 164targets += $(obj)/.x509.list
165$(obj)/.x509.list:
166 @echo $(X509_CERTIFICATES) >$@
137 167
168clean-files := x509_certificate_list .x509.list
169endif
170
171ifeq ($(CONFIG_MODULE_SIG),y)
138############################################################################### 172###############################################################################
139# 173#
140# If module signing is requested, say by allyesconfig, but a key has not been 174# If module signing is requested, say by allyesconfig, but a key has not been
diff --git a/kernel/audit.c b/kernel/audit.c
index 7b0e23a740ce..906ae5a0233a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -60,7 +60,6 @@
60#ifdef CONFIG_SECURITY 60#ifdef CONFIG_SECURITY
61#include <linux/security.h> 61#include <linux/security.h>
62#endif 62#endif
63#include <net/netlink.h>
64#include <linux/freezer.h> 63#include <linux/freezer.h>
65#include <linux/tty.h> 64#include <linux/tty.h>
66#include <linux/pid_namespace.h> 65#include <linux/pid_namespace.h>
@@ -140,6 +139,17 @@ static struct task_struct *kauditd_task;
140static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); 139static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait);
141static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); 140static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait);
142 141
142static struct audit_features af = {.vers = AUDIT_FEATURE_VERSION,
143 .mask = -1,
144 .features = 0,
145 .lock = 0,};
146
147static char *audit_feature_names[2] = {
148 "only_unset_loginuid",
149 "loginuid_immutable",
150};
151
152
143/* Serialize requests from userspace. */ 153/* Serialize requests from userspace. */
144DEFINE_MUTEX(audit_cmd_mutex); 154DEFINE_MUTEX(audit_cmd_mutex);
145 155
@@ -584,6 +594,8 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
584 return -EOPNOTSUPP; 594 return -EOPNOTSUPP;
585 case AUDIT_GET: 595 case AUDIT_GET:
586 case AUDIT_SET: 596 case AUDIT_SET:
597 case AUDIT_GET_FEATURE:
598 case AUDIT_SET_FEATURE:
587 case AUDIT_LIST_RULES: 599 case AUDIT_LIST_RULES:
588 case AUDIT_ADD_RULE: 600 case AUDIT_ADD_RULE:
589 case AUDIT_DEL_RULE: 601 case AUDIT_DEL_RULE:
@@ -613,7 +625,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
613 int rc = 0; 625 int rc = 0;
614 uid_t uid = from_kuid(&init_user_ns, current_uid()); 626 uid_t uid = from_kuid(&init_user_ns, current_uid());
615 627
616 if (!audit_enabled) { 628 if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
617 *ab = NULL; 629 *ab = NULL;
618 return rc; 630 return rc;
619 } 631 }
@@ -628,6 +640,94 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
628 return rc; 640 return rc;
629} 641}
630 642
643int is_audit_feature_set(int i)
644{
645 return af.features & AUDIT_FEATURE_TO_MASK(i);
646}
647
648
649static int audit_get_feature(struct sk_buff *skb)
650{
651 u32 seq;
652
653 seq = nlmsg_hdr(skb)->nlmsg_seq;
654
655 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
656 &af, sizeof(af));
657
658 return 0;
659}
660
661static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature,
662 u32 old_lock, u32 new_lock, int res)
663{
664 struct audit_buffer *ab;
665
666 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
667 audit_log_format(ab, "feature=%s new=%d old=%d old_lock=%d new_lock=%d res=%d",
668 audit_feature_names[which], !!old_feature, !!new_feature,
669 !!old_lock, !!new_lock, res);
670 audit_log_end(ab);
671}
672
673static int audit_set_feature(struct sk_buff *skb)
674{
675 struct audit_features *uaf;
676 int i;
677
678 BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > sizeof(audit_feature_names)/sizeof(audit_feature_names[0]));
679 uaf = nlmsg_data(nlmsg_hdr(skb));
680
681 /* if there is ever a version 2 we should handle that here */
682
683 for (i = 0; i <= AUDIT_LAST_FEATURE; i++) {
684 u32 feature = AUDIT_FEATURE_TO_MASK(i);
685 u32 old_feature, new_feature, old_lock, new_lock;
686
687 /* if we are not changing this feature, move along */
688 if (!(feature & uaf->mask))
689 continue;
690
691 old_feature = af.features & feature;
692 new_feature = uaf->features & feature;
693 new_lock = (uaf->lock | af.lock) & feature;
694 old_lock = af.lock & feature;
695
696 /* are we changing a locked feature? */
697 if ((af.lock & feature) && (new_feature != old_feature)) {
698 audit_log_feature_change(i, old_feature, new_feature,
699 old_lock, new_lock, 0);
700 return -EPERM;
701 }
702 }
703 /* nothing invalid, do the changes */
704 for (i = 0; i <= AUDIT_LAST_FEATURE; i++) {
705 u32 feature = AUDIT_FEATURE_TO_MASK(i);
706 u32 old_feature, new_feature, old_lock, new_lock;
707
708 /* if we are not changing this feature, move along */
709 if (!(feature & uaf->mask))
710 continue;
711
712 old_feature = af.features & feature;
713 new_feature = uaf->features & feature;
714 old_lock = af.lock & feature;
715 new_lock = (uaf->lock | af.lock) & feature;
716
717 if (new_feature != old_feature)
718 audit_log_feature_change(i, old_feature, new_feature,
719 old_lock, new_lock, 1);
720
721 if (new_feature)
722 af.features |= feature;
723 else
724 af.features &= ~feature;
725 af.lock |= new_lock;
726 }
727
728 return 0;
729}
730
631static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 731static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
632{ 732{
633 u32 seq; 733 u32 seq;
@@ -659,6 +759,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
659 759
660 switch (msg_type) { 760 switch (msg_type) {
661 case AUDIT_GET: 761 case AUDIT_GET:
762 memset(&status_set, 0, sizeof(status_set));
662 status_set.enabled = audit_enabled; 763 status_set.enabled = audit_enabled;
663 status_set.failure = audit_failure; 764 status_set.failure = audit_failure;
664 status_set.pid = audit_pid; 765 status_set.pid = audit_pid;
@@ -670,7 +771,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
670 &status_set, sizeof(status_set)); 771 &status_set, sizeof(status_set));
671 break; 772 break;
672 case AUDIT_SET: 773 case AUDIT_SET:
673 if (nlh->nlmsg_len < sizeof(struct audit_status)) 774 if (nlmsg_len(nlh) < sizeof(struct audit_status))
674 return -EINVAL; 775 return -EINVAL;
675 status_get = (struct audit_status *)data; 776 status_get = (struct audit_status *)data;
676 if (status_get->mask & AUDIT_STATUS_ENABLED) { 777 if (status_get->mask & AUDIT_STATUS_ENABLED) {
@@ -699,6 +800,16 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
699 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) 800 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
700 err = audit_set_backlog_limit(status_get->backlog_limit); 801 err = audit_set_backlog_limit(status_get->backlog_limit);
701 break; 802 break;
803 case AUDIT_GET_FEATURE:
804 err = audit_get_feature(skb);
805 if (err)
806 return err;
807 break;
808 case AUDIT_SET_FEATURE:
809 err = audit_set_feature(skb);
810 if (err)
811 return err;
812 break;
702 case AUDIT_USER: 813 case AUDIT_USER:
703 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: 814 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
704 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: 815 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
@@ -715,7 +826,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
715 } 826 }
716 audit_log_common_recv_msg(&ab, msg_type); 827 audit_log_common_recv_msg(&ab, msg_type);
717 if (msg_type != AUDIT_USER_TTY) 828 if (msg_type != AUDIT_USER_TTY)
718 audit_log_format(ab, " msg='%.1024s'", 829 audit_log_format(ab, " msg='%.*s'",
830 AUDIT_MESSAGE_TEXT_MAX,
719 (char *)data); 831 (char *)data);
720 else { 832 else {
721 int size; 833 int size;
@@ -818,7 +930,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
818 struct task_struct *tsk = current; 930 struct task_struct *tsk = current;
819 931
820 spin_lock(&tsk->sighand->siglock); 932 spin_lock(&tsk->sighand->siglock);
821 s.enabled = tsk->signal->audit_tty != 0; 933 s.enabled = tsk->signal->audit_tty;
822 s.log_passwd = tsk->signal->audit_tty_log_passwd; 934 s.log_passwd = tsk->signal->audit_tty_log_passwd;
823 spin_unlock(&tsk->sighand->siglock); 935 spin_unlock(&tsk->sighand->siglock);
824 936
@@ -832,7 +944,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
832 944
833 memset(&s, 0, sizeof(s)); 945 memset(&s, 0, sizeof(s));
834 /* guard against past and future API changes */ 946 /* guard against past and future API changes */
835 memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len)); 947 memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
836 if ((s.enabled != 0 && s.enabled != 1) || 948 if ((s.enabled != 0 && s.enabled != 1) ||
837 (s.log_passwd != 0 && s.log_passwd != 1)) 949 (s.log_passwd != 0 && s.log_passwd != 1))
838 return -EINVAL; 950 return -EINVAL;
@@ -1067,13 +1179,6 @@ static void wait_for_auditd(unsigned long sleep_time)
1067 remove_wait_queue(&audit_backlog_wait, &wait); 1179 remove_wait_queue(&audit_backlog_wait, &wait);
1068} 1180}
1069 1181
1070/* Obtain an audit buffer. This routine does locking to obtain the
1071 * audit buffer, but then no locking is required for calls to
1072 * audit_log_*format. If the tsk is a task that is currently in a
1073 * syscall, then the syscall is marked as auditable and an audit record
1074 * will be written at syscall exit. If there is no associated task, tsk
1075 * should be NULL. */
1076
1077/** 1182/**
1078 * audit_log_start - obtain an audit buffer 1183 * audit_log_start - obtain an audit buffer
1079 * @ctx: audit_context (may be NULL) 1184 * @ctx: audit_context (may be NULL)
@@ -1389,7 +1494,7 @@ void audit_log_session_info(struct audit_buffer *ab)
1389 u32 sessionid = audit_get_sessionid(current); 1494 u32 sessionid = audit_get_sessionid(current);
1390 uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current)); 1495 uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
1391 1496
1392 audit_log_format(ab, " auid=%u ses=%u\n", auid, sessionid); 1497 audit_log_format(ab, " auid=%u ses=%u", auid, sessionid);
1393} 1498}
1394 1499
1395void audit_log_key(struct audit_buffer *ab, char *key) 1500void audit_log_key(struct audit_buffer *ab, char *key)
@@ -1536,6 +1641,26 @@ void audit_log_name(struct audit_context *context, struct audit_names *n,
1536 } 1641 }
1537 } 1642 }
1538 1643
1644 /* log the audit_names record type */
1645 audit_log_format(ab, " nametype=");
1646 switch(n->type) {
1647 case AUDIT_TYPE_NORMAL:
1648 audit_log_format(ab, "NORMAL");
1649 break;
1650 case AUDIT_TYPE_PARENT:
1651 audit_log_format(ab, "PARENT");
1652 break;
1653 case AUDIT_TYPE_CHILD_DELETE:
1654 audit_log_format(ab, "DELETE");
1655 break;
1656 case AUDIT_TYPE_CHILD_CREATE:
1657 audit_log_format(ab, "CREATE");
1658 break;
1659 default:
1660 audit_log_format(ab, "UNKNOWN");
1661 break;
1662 }
1663
1539 audit_log_fcaps(ab, n); 1664 audit_log_fcaps(ab, n);
1540 audit_log_end(ab); 1665 audit_log_end(ab);
1541} 1666}
diff --git a/kernel/audit.h b/kernel/audit.h
index 123c9b7c3979..b779642b29af 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -197,6 +197,9 @@ struct audit_context {
197 int fd; 197 int fd;
198 int flags; 198 int flags;
199 } mmap; 199 } mmap;
200 struct {
201 int argc;
202 } execve;
200 }; 203 };
201 int fds[2]; 204 int fds[2];
202 205
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index f7aee8be7fb2..51f3fd4c1ed3 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -343,6 +343,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
343 case AUDIT_DEVMINOR: 343 case AUDIT_DEVMINOR:
344 case AUDIT_EXIT: 344 case AUDIT_EXIT:
345 case AUDIT_SUCCESS: 345 case AUDIT_SUCCESS:
346 case AUDIT_INODE:
346 /* bit ops are only useful on syscall args */ 347 /* bit ops are only useful on syscall args */
347 if (f->op == Audit_bitmask || f->op == Audit_bittest) 348 if (f->op == Audit_bitmask || f->op == Audit_bittest)
348 return -EINVAL; 349 return -EINVAL;
@@ -423,7 +424,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
423 f->lsm_rule = NULL; 424 f->lsm_rule = NULL;
424 425
425 /* Support legacy tests for a valid loginuid */ 426 /* Support legacy tests for a valid loginuid */
426 if ((f->type == AUDIT_LOGINUID) && (f->val == ~0U)) { 427 if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
427 f->type = AUDIT_LOGINUID_SET; 428 f->type = AUDIT_LOGINUID_SET;
428 f->val = 0; 429 f->val = 0;
429 } 430 }
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 9845cb32b60a..90594c9f7552 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -95,13 +95,6 @@ struct audit_aux_data {
95/* Number of target pids per aux struct. */ 95/* Number of target pids per aux struct. */
96#define AUDIT_AUX_PIDS 16 96#define AUDIT_AUX_PIDS 16
97 97
98struct audit_aux_data_execve {
99 struct audit_aux_data d;
100 int argc;
101 int envc;
102 struct mm_struct *mm;
103};
104
105struct audit_aux_data_pids { 98struct audit_aux_data_pids {
106 struct audit_aux_data d; 99 struct audit_aux_data d;
107 pid_t target_pid[AUDIT_AUX_PIDS]; 100 pid_t target_pid[AUDIT_AUX_PIDS];
@@ -121,12 +114,6 @@ struct audit_aux_data_bprm_fcaps {
121 struct audit_cap_data new_pcap; 114 struct audit_cap_data new_pcap;
122}; 115};
123 116
124struct audit_aux_data_capset {
125 struct audit_aux_data d;
126 pid_t pid;
127 struct audit_cap_data cap;
128};
129
130struct audit_tree_refs { 117struct audit_tree_refs {
131 struct audit_tree_refs *next; 118 struct audit_tree_refs *next;
132 struct audit_chunk *c[31]; 119 struct audit_chunk *c[31];
@@ -566,7 +553,7 @@ static int audit_filter_rules(struct task_struct *tsk,
566 break; 553 break;
567 case AUDIT_INODE: 554 case AUDIT_INODE:
568 if (name) 555 if (name)
569 result = (name->ino == f->val); 556 result = audit_comparator(name->ino, f->op, f->val);
570 else if (ctx) { 557 else if (ctx) {
571 list_for_each_entry(n, &ctx->names_list, list) { 558 list_for_each_entry(n, &ctx->names_list, list) {
572 if (audit_comparator(n->ino, f->op, f->val)) { 559 if (audit_comparator(n->ino, f->op, f->val)) {
@@ -943,8 +930,10 @@ int audit_alloc(struct task_struct *tsk)
943 return 0; /* Return if not auditing. */ 930 return 0; /* Return if not auditing. */
944 931
945 state = audit_filter_task(tsk, &key); 932 state = audit_filter_task(tsk, &key);
946 if (state == AUDIT_DISABLED) 933 if (state == AUDIT_DISABLED) {
934 clear_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT);
947 return 0; 935 return 0;
936 }
948 937
949 if (!(context = audit_alloc_context(state))) { 938 if (!(context = audit_alloc_context(state))) {
950 kfree(key); 939 kfree(key);
@@ -1149,20 +1138,16 @@ static int audit_log_single_execve_arg(struct audit_context *context,
1149} 1138}
1150 1139
1151static void audit_log_execve_info(struct audit_context *context, 1140static void audit_log_execve_info(struct audit_context *context,
1152 struct audit_buffer **ab, 1141 struct audit_buffer **ab)
1153 struct audit_aux_data_execve *axi)
1154{ 1142{
1155 int i, len; 1143 int i, len;
1156 size_t len_sent = 0; 1144 size_t len_sent = 0;
1157 const char __user *p; 1145 const char __user *p;
1158 char *buf; 1146 char *buf;
1159 1147
1160 if (axi->mm != current->mm) 1148 p = (const char __user *)current->mm->arg_start;
1161 return; /* execve failed, no additional info */
1162
1163 p = (const char __user *)axi->mm->arg_start;
1164 1149
1165 audit_log_format(*ab, "argc=%d", axi->argc); 1150 audit_log_format(*ab, "argc=%d", context->execve.argc);
1166 1151
1167 /* 1152 /*
1168 * we need some kernel buffer to hold the userspace args. Just 1153 * we need some kernel buffer to hold the userspace args. Just
@@ -1176,7 +1161,7 @@ static void audit_log_execve_info(struct audit_context *context,
1176 return; 1161 return;
1177 } 1162 }
1178 1163
1179 for (i = 0; i < axi->argc; i++) { 1164 for (i = 0; i < context->execve.argc; i++) {
1180 len = audit_log_single_execve_arg(context, ab, i, 1165 len = audit_log_single_execve_arg(context, ab, i,
1181 &len_sent, p, buf); 1166 &len_sent, p, buf);
1182 if (len <= 0) 1167 if (len <= 0)
@@ -1279,6 +1264,9 @@ static void show_special(struct audit_context *context, int *call_panic)
1279 audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, 1264 audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd,
1280 context->mmap.flags); 1265 context->mmap.flags);
1281 break; } 1266 break; }
1267 case AUDIT_EXECVE: {
1268 audit_log_execve_info(context, &ab);
1269 break; }
1282 } 1270 }
1283 audit_log_end(ab); 1271 audit_log_end(ab);
1284} 1272}
@@ -1325,11 +1313,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1325 1313
1326 switch (aux->type) { 1314 switch (aux->type) {
1327 1315
1328 case AUDIT_EXECVE: {
1329 struct audit_aux_data_execve *axi = (void *)aux;
1330 audit_log_execve_info(context, &ab, axi);
1331 break; }
1332
1333 case AUDIT_BPRM_FCAPS: { 1316 case AUDIT_BPRM_FCAPS: {
1334 struct audit_aux_data_bprm_fcaps *axs = (void *)aux; 1317 struct audit_aux_data_bprm_fcaps *axs = (void *)aux;
1335 audit_log_format(ab, "fver=%x", axs->fcap_ver); 1318 audit_log_format(ab, "fver=%x", axs->fcap_ver);
@@ -1964,6 +1947,43 @@ int auditsc_get_stamp(struct audit_context *ctx,
1964/* global counter which is incremented every time something logs in */ 1947/* global counter which is incremented every time something logs in */
1965static atomic_t session_id = ATOMIC_INIT(0); 1948static atomic_t session_id = ATOMIC_INIT(0);
1966 1949
1950static int audit_set_loginuid_perm(kuid_t loginuid)
1951{
1952 /* if we are unset, we don't need privs */
1953 if (!audit_loginuid_set(current))
1954 return 0;
1955 /* if AUDIT_FEATURE_LOGINUID_IMMUTABLE means never ever allow a change*/
1956 if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE))
1957 return -EPERM;
1958 /* it is set, you need permission */
1959 if (!capable(CAP_AUDIT_CONTROL))
1960 return -EPERM;
1961 /* reject if this is not an unset and we don't allow that */
1962 if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID) && uid_valid(loginuid))
1963 return -EPERM;
1964 return 0;
1965}
1966
1967static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
1968 unsigned int oldsessionid, unsigned int sessionid,
1969 int rc)
1970{
1971 struct audit_buffer *ab;
1972 uid_t uid, ologinuid, nloginuid;
1973
1974 uid = from_kuid(&init_user_ns, task_uid(current));
1975 ologinuid = from_kuid(&init_user_ns, koldloginuid);
1976 nloginuid = from_kuid(&init_user_ns, kloginuid),
1977
1978 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
1979 if (!ab)
1980 return;
1981 audit_log_format(ab, "pid=%d uid=%u old auid=%u new auid=%u old "
1982 "ses=%u new ses=%u res=%d", current->pid, uid, ologinuid,
1983 nloginuid, oldsessionid, sessionid, !rc);
1984 audit_log_end(ab);
1985}
1986
1967/** 1987/**
1968 * audit_set_loginuid - set current task's audit_context loginuid 1988 * audit_set_loginuid - set current task's audit_context loginuid
1969 * @loginuid: loginuid value 1989 * @loginuid: loginuid value
@@ -1975,37 +1995,26 @@ static atomic_t session_id = ATOMIC_INIT(0);
1975int audit_set_loginuid(kuid_t loginuid) 1995int audit_set_loginuid(kuid_t loginuid)
1976{ 1996{
1977 struct task_struct *task = current; 1997 struct task_struct *task = current;
1978 struct audit_context *context = task->audit_context; 1998 unsigned int oldsessionid, sessionid = (unsigned int)-1;
1979 unsigned int sessionid; 1999 kuid_t oldloginuid;
2000 int rc;
1980 2001
1981#ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE 2002 oldloginuid = audit_get_loginuid(current);
1982 if (audit_loginuid_set(task)) 2003 oldsessionid = audit_get_sessionid(current);
1983 return -EPERM;
1984#else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
1985 if (!capable(CAP_AUDIT_CONTROL))
1986 return -EPERM;
1987#endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
1988 2004
1989 sessionid = atomic_inc_return(&session_id); 2005 rc = audit_set_loginuid_perm(loginuid);
1990 if (context && context->in_syscall) { 2006 if (rc)
1991 struct audit_buffer *ab; 2007 goto out;
2008
2009 /* are we setting or clearing? */
2010 if (uid_valid(loginuid))
2011 sessionid = atomic_inc_return(&session_id);
1992 2012
1993 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
1994 if (ab) {
1995 audit_log_format(ab, "login pid=%d uid=%u "
1996 "old auid=%u new auid=%u"
1997 " old ses=%u new ses=%u",
1998 task->pid,
1999 from_kuid(&init_user_ns, task_uid(task)),
2000 from_kuid(&init_user_ns, task->loginuid),
2001 from_kuid(&init_user_ns, loginuid),
2002 task->sessionid, sessionid);
2003 audit_log_end(ab);
2004 }
2005 }
2006 task->sessionid = sessionid; 2013 task->sessionid = sessionid;
2007 task->loginuid = loginuid; 2014 task->loginuid = loginuid;
2008 return 0; 2015out:
2016 audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc);
2017 return rc;
2009} 2018}
2010 2019
2011/** 2020/**
@@ -2126,22 +2135,12 @@ void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mo
2126 context->ipc.has_perm = 1; 2135 context->ipc.has_perm = 1;
2127} 2136}
2128 2137
2129int __audit_bprm(struct linux_binprm *bprm) 2138void __audit_bprm(struct linux_binprm *bprm)
2130{ 2139{
2131 struct audit_aux_data_execve *ax;
2132 struct audit_context *context = current->audit_context; 2140 struct audit_context *context = current->audit_context;
2133 2141
2134 ax = kmalloc(sizeof(*ax), GFP_KERNEL); 2142 context->type = AUDIT_EXECVE;
2135 if (!ax) 2143 context->execve.argc = bprm->argc;
2136 return -ENOMEM;
2137
2138 ax->argc = bprm->argc;
2139 ax->envc = bprm->envc;
2140 ax->mm = bprm->mm;
2141 ax->d.type = AUDIT_EXECVE;
2142 ax->d.next = context->aux;
2143 context->aux = (void *)ax;
2144 return 0;
2145} 2144}
2146 2145
2147 2146
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 578782ef6ae1..5253204afdca 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -11,7 +11,7 @@
11#include <linux/kbuild.h> 11#include <linux/kbuild.h>
12#include <linux/page_cgroup.h> 12#include <linux/page_cgroup.h>
13#include <linux/log2.h> 13#include <linux/log2.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock_types.h>
15 15
16void foo(void) 16void foo(void)
17{ 17{
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e0839bcd48c8..8b729c278b64 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -90,6 +90,14 @@ static DEFINE_MUTEX(cgroup_mutex);
90static DEFINE_MUTEX(cgroup_root_mutex); 90static DEFINE_MUTEX(cgroup_root_mutex);
91 91
92/* 92/*
93 * cgroup destruction makes heavy use of work items and there can be a lot
94 * of concurrent destructions. Use a separate workqueue so that cgroup
95 * destruction work items don't end up filling up max_active of system_wq
96 * which may lead to deadlock.
97 */
98static struct workqueue_struct *cgroup_destroy_wq;
99
100/*
93 * Generate an array of cgroup subsystem pointers. At boot time, this is 101 * Generate an array of cgroup subsystem pointers. At boot time, this is
94 * populated with the built in subsystems, and modular subsystems are 102 * populated with the built in subsystems, and modular subsystems are
95 * registered after that. The mutable section of this array is protected by 103 * registered after that. The mutable section of this array is protected by
@@ -191,6 +199,7 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp);
191static int cgroup_destroy_locked(struct cgroup *cgrp); 199static int cgroup_destroy_locked(struct cgroup *cgrp);
192static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], 200static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
193 bool is_add); 201 bool is_add);
202static int cgroup_file_release(struct inode *inode, struct file *file);
194 203
195/** 204/**
196 * cgroup_css - obtain a cgroup's css for the specified subsystem 205 * cgroup_css - obtain a cgroup's css for the specified subsystem
@@ -871,7 +880,7 @@ static void cgroup_free_rcu(struct rcu_head *head)
871 struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head); 880 struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
872 881
873 INIT_WORK(&cgrp->destroy_work, cgroup_free_fn); 882 INIT_WORK(&cgrp->destroy_work, cgroup_free_fn);
874 schedule_work(&cgrp->destroy_work); 883 queue_work(cgroup_destroy_wq, &cgrp->destroy_work);
875} 884}
876 885
877static void cgroup_diput(struct dentry *dentry, struct inode *inode) 886static void cgroup_diput(struct dentry *dentry, struct inode *inode)
@@ -895,11 +904,6 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
895 iput(inode); 904 iput(inode);
896} 905}
897 906
898static int cgroup_delete(const struct dentry *d)
899{
900 return 1;
901}
902
903static void remove_dir(struct dentry *d) 907static void remove_dir(struct dentry *d)
904{ 908{
905 struct dentry *parent = dget(d->d_parent); 909 struct dentry *parent = dget(d->d_parent);
@@ -1486,7 +1490,7 @@ static int cgroup_get_rootdir(struct super_block *sb)
1486{ 1490{
1487 static const struct dentry_operations cgroup_dops = { 1491 static const struct dentry_operations cgroup_dops = {
1488 .d_iput = cgroup_diput, 1492 .d_iput = cgroup_diput,
1489 .d_delete = cgroup_delete, 1493 .d_delete = always_delete_dentry,
1490 }; 1494 };
1491 1495
1492 struct inode *inode = 1496 struct inode *inode =
@@ -2426,7 +2430,7 @@ static const struct file_operations cgroup_seqfile_operations = {
2426 .read = seq_read, 2430 .read = seq_read,
2427 .write = cgroup_file_write, 2431 .write = cgroup_file_write,
2428 .llseek = seq_lseek, 2432 .llseek = seq_lseek,
2429 .release = single_release, 2433 .release = cgroup_file_release,
2430}; 2434};
2431 2435
2432static int cgroup_file_open(struct inode *inode, struct file *file) 2436static int cgroup_file_open(struct inode *inode, struct file *file)
@@ -2487,6 +2491,8 @@ static int cgroup_file_release(struct inode *inode, struct file *file)
2487 ret = cft->release(inode, file); 2491 ret = cft->release(inode, file);
2488 if (css->ss) 2492 if (css->ss)
2489 css_put(css); 2493 css_put(css);
2494 if (file->f_op == &cgroup_seqfile_operations)
2495 single_release(inode, file);
2490 return ret; 2496 return ret;
2491} 2497}
2492 2498
@@ -4254,7 +4260,7 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
4254 * css_put(). dput() requires process context which we don't have. 4260 * css_put(). dput() requires process context which we don't have.
4255 */ 4261 */
4256 INIT_WORK(&css->destroy_work, css_free_work_fn); 4262 INIT_WORK(&css->destroy_work, css_free_work_fn);
4257 schedule_work(&css->destroy_work); 4263 queue_work(cgroup_destroy_wq, &css->destroy_work);
4258} 4264}
4259 4265
4260static void css_release(struct percpu_ref *ref) 4266static void css_release(struct percpu_ref *ref)
@@ -4544,7 +4550,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
4544 container_of(ref, struct cgroup_subsys_state, refcnt); 4550 container_of(ref, struct cgroup_subsys_state, refcnt);
4545 4551
4546 INIT_WORK(&css->destroy_work, css_killed_work_fn); 4552 INIT_WORK(&css->destroy_work, css_killed_work_fn);
4547 schedule_work(&css->destroy_work); 4553 queue_work(cgroup_destroy_wq, &css->destroy_work);
4548} 4554}
4549 4555
4550/** 4556/**
@@ -5068,6 +5074,22 @@ out:
5068 return err; 5074 return err;
5069} 5075}
5070 5076
5077static int __init cgroup_wq_init(void)
5078{
5079 /*
5080 * There isn't much point in executing destruction path in
5081 * parallel. Good chunk is serialized with cgroup_mutex anyway.
5082 * Use 1 for @max_active.
5083 *
5084 * We would prefer to do this in cgroup_init() above, but that
5085 * is called before init_workqueues(): so leave this until after.
5086 */
5087 cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
5088 BUG_ON(!cgroup_destroy_wq);
5089 return 0;
5090}
5091core_initcall(cgroup_wq_init);
5092
5071/* 5093/*
5072 * proc_cgroup_show() 5094 * proc_cgroup_show()
5073 * - Print task's cgroup paths into seq_file, one line for each hierarchy 5095 * - Print task's cgroup paths into seq_file, one line for each hierarchy
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 6bf981e13c43..4772034b4b17 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1033,8 +1033,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
1033 need_loop = task_has_mempolicy(tsk) || 1033 need_loop = task_has_mempolicy(tsk) ||
1034 !nodes_intersects(*newmems, tsk->mems_allowed); 1034 !nodes_intersects(*newmems, tsk->mems_allowed);
1035 1035
1036 if (need_loop) 1036 if (need_loop) {
1037 local_irq_disable();
1037 write_seqcount_begin(&tsk->mems_allowed_seq); 1038 write_seqcount_begin(&tsk->mems_allowed_seq);
1039 }
1038 1040
1039 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); 1041 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1040 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); 1042 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
@@ -1042,8 +1044,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
1042 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2); 1044 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1043 tsk->mems_allowed = *newmems; 1045 tsk->mems_allowed = *newmems;
1044 1046
1045 if (need_loop) 1047 if (need_loop) {
1046 write_seqcount_end(&tsk->mems_allowed_seq); 1048 write_seqcount_end(&tsk->mems_allowed_seq);
1049 local_irq_enable();
1050 }
1047 1051
1048 task_unlock(tsk); 1052 task_unlock(tsk);
1049} 1053}
diff --git a/kernel/extable.c b/kernel/extable.c
index 832cb28105bb..763faf037ec1 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -61,7 +61,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
61static inline int init_kernel_text(unsigned long addr) 61static inline int init_kernel_text(unsigned long addr)
62{ 62{
63 if (addr >= (unsigned long)_sinittext && 63 if (addr >= (unsigned long)_sinittext &&
64 addr <= (unsigned long)_einittext) 64 addr < (unsigned long)_einittext)
65 return 1; 65 return 1;
66 return 0; 66 return 0;
67} 67}
@@ -69,7 +69,7 @@ static inline int init_kernel_text(unsigned long addr)
69int core_kernel_text(unsigned long addr) 69int core_kernel_text(unsigned long addr)
70{ 70{
71 if (addr >= (unsigned long)_stext && 71 if (addr >= (unsigned long)_stext &&
72 addr <= (unsigned long)_etext) 72 addr < (unsigned long)_etext)
73 return 1; 73 return 1;
74 74
75 if (system_state == SYSTEM_BOOTING && 75 if (system_state == SYSTEM_BOOTING &&
diff --git a/kernel/futex.c b/kernel/futex.c
index 80ba086f021d..f6ff0191ecf7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
251 return -EINVAL; 251 return -EINVAL;
252 address -= key->both.offset; 252 address -= key->both.offset;
253 253
254 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
255 return -EFAULT;
256
254 /* 257 /*
255 * PROCESS_PRIVATE futexes are fast. 258 * PROCESS_PRIVATE futexes are fast.
256 * As the mm cannot disappear under us and the 'key' only needs 259 * As the mm cannot disappear under us and the 'key' only needs
@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
259 * but access_ok() should be faster than find_vma() 262 * but access_ok() should be faster than find_vma()
260 */ 263 */
261 if (!fshared) { 264 if (!fshared) {
262 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
263 return -EFAULT;
264 key->private.mm = mm; 265 key->private.mm = mm;
265 key->private.address = address; 266 key->private.address = address;
266 get_futex_key_refs(key); 267 get_futex_key_refs(key);
@@ -288,7 +289,7 @@ again:
288 put_page(page); 289 put_page(page);
289 /* serialize against __split_huge_page_splitting() */ 290 /* serialize against __split_huge_page_splitting() */
290 local_irq_disable(); 291 local_irq_disable();
291 if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { 292 if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
292 page_head = compound_head(page); 293 page_head = compound_head(page);
293 /* 294 /*
294 * page_head is valid pointer but we must pin 295 * page_head is valid pointer but we must pin
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index cb228bf21760..abcd6ca86cb7 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -50,7 +50,7 @@ static void resume_irqs(bool want_early)
50 bool is_early = desc->action && 50 bool is_early = desc->action &&
51 desc->action->flags & IRQF_EARLY_RESUME; 51 desc->action->flags & IRQF_EARLY_RESUME;
52 52
53 if (is_early != want_early) 53 if (!is_early && want_early)
54 continue; 54 continue;
55 55
56 raw_spin_lock_irqsave(&desc->lock, flags); 56 raw_spin_lock_irqsave(&desc->lock, flags);
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 1162f1030f18..3320b84cc60f 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -14,6 +14,7 @@ enum {
14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING, 14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING,
15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, 15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, 16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
17 _IRQ_IS_POLLED = IRQ_IS_POLLED,
17 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 18 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
18}; 19};
19 20
@@ -26,6 +27,7 @@ enum {
26#define IRQ_NOAUTOEN GOT_YOU_MORON 27#define IRQ_NOAUTOEN GOT_YOU_MORON
27#define IRQ_NESTED_THREAD GOT_YOU_MORON 28#define IRQ_NESTED_THREAD GOT_YOU_MORON
28#define IRQ_PER_CPU_DEVID GOT_YOU_MORON 29#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
30#define IRQ_IS_POLLED GOT_YOU_MORON
29#undef IRQF_MODIFY_MASK 31#undef IRQF_MODIFY_MASK
30#define IRQF_MODIFY_MASK GOT_YOU_MORON 32#define IRQF_MODIFY_MASK GOT_YOU_MORON
31 33
@@ -147,3 +149,8 @@ static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
147{ 149{
148 return desc->status_use_accessors & _IRQ_NESTED_THREAD; 150 return desc->status_use_accessors & _IRQ_NESTED_THREAD;
149} 151}
152
153static inline bool irq_settings_is_polled(struct irq_desc *desc)
154{
155 return desc->status_use_accessors & _IRQ_IS_POLLED;
156}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 7b5f012bde9d..a1d8cc63b56e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -67,8 +67,13 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
67 67
68 raw_spin_lock(&desc->lock); 68 raw_spin_lock(&desc->lock);
69 69
70 /* PER_CPU and nested thread interrupts are never polled */ 70 /*
71 if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) 71 * PER_CPU, nested thread interrupts and interrupts explicitely
72 * marked polled are excluded from polling.
73 */
74 if (irq_settings_is_per_cpu(desc) ||
75 irq_settings_is_nested_thread(desc) ||
76 irq_settings_is_polled(desc))
72 goto out; 77 goto out;
73 78
74 /* 79 /*
@@ -268,7 +273,8 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
268void note_interrupt(unsigned int irq, struct irq_desc *desc, 273void note_interrupt(unsigned int irq, struct irq_desc *desc,
269 irqreturn_t action_ret) 274 irqreturn_t action_ret)
270{ 275{
271 if (desc->istate & IRQS_POLL_INPROGRESS) 276 if (desc->istate & IRQS_POLL_INPROGRESS ||
277 irq_settings_is_polled(desc))
272 return; 278 return;
273 279
274 /* we get here again via the threaded handler */ 280 /* we get here again via the threaded handler */
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 490afc03627e..d0d8fca54065 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
47size_t vmcoreinfo_size; 47size_t vmcoreinfo_size;
48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); 48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
49 49
50/* Flag to indicate we are going to kexec a new kernel */
51bool kexec_in_progress = false;
52
50/* Location of the reserved area for the crash kernel */ 53/* Location of the reserved area for the crash kernel */
51struct resource crashk_res = { 54struct resource crashk_res = {
52 .name = "Crash kernel", 55 .name = "Crash kernel",
@@ -1675,6 +1678,7 @@ int kernel_kexec(void)
1675 } else 1678 } else
1676#endif 1679#endif
1677 { 1680 {
1681 kexec_in_progress = true;
1678 kernel_restart_prepare(NULL); 1682 kernel_restart_prepare(NULL);
1679 printk(KERN_EMERG "Starting new kernel\n"); 1683 printk(KERN_EMERG "Starting new kernel\n");
1680 machine_shutdown(); 1684 machine_shutdown();
diff --git a/kernel/modsign_certificate.S b/kernel/modsign_certificate.S
deleted file mode 100644
index 4a9a86d12c8b..000000000000
--- a/kernel/modsign_certificate.S
+++ /dev/null
@@ -1,12 +0,0 @@
1#include <linux/export.h>
2
3#define GLOBAL(name) \
4 .globl VMLINUX_SYMBOL(name); \
5 VMLINUX_SYMBOL(name):
6
7 .section ".init.data","aw"
8
9GLOBAL(modsign_certificate_list)
10 .incbin "signing_key.x509"
11 .incbin "extra_certificates"
12GLOBAL(modsign_certificate_list_end)
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
deleted file mode 100644
index 7cbd4507a7e6..000000000000
--- a/kernel/modsign_pubkey.c
+++ /dev/null
@@ -1,104 +0,0 @@
1/* Public keys for module signature verification
2 *
3 * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/cred.h>
15#include <linux/err.h>
16#include <keys/asymmetric-type.h>
17#include "module-internal.h"
18
19struct key *modsign_keyring;
20
21extern __initconst const u8 modsign_certificate_list[];
22extern __initconst const u8 modsign_certificate_list_end[];
23
24/*
25 * We need to make sure ccache doesn't cache the .o file as it doesn't notice
26 * if modsign.pub changes.
27 */
28static __initconst const char annoy_ccache[] = __TIME__ "foo";
29
30/*
31 * Load the compiled-in keys
32 */
33static __init int module_verify_init(void)
34{
35 pr_notice("Initialise module verification\n");
36
37 modsign_keyring = keyring_alloc(".module_sign",
38 KUIDT_INIT(0), KGIDT_INIT(0),
39 current_cred(),
40 ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
41 KEY_USR_VIEW | KEY_USR_READ),
42 KEY_ALLOC_NOT_IN_QUOTA, NULL);
43 if (IS_ERR(modsign_keyring))
44 panic("Can't allocate module signing keyring\n");
45
46 return 0;
47}
48
49/*
50 * Must be initialised before we try and load the keys into the keyring.
51 */
52device_initcall(module_verify_init);
53
54/*
55 * Load the compiled-in keys
56 */
57static __init int load_module_signing_keys(void)
58{
59 key_ref_t key;
60 const u8 *p, *end;
61 size_t plen;
62
63 pr_notice("Loading module verification certificates\n");
64
65 end = modsign_certificate_list_end;
66 p = modsign_certificate_list;
67 while (p < end) {
68 /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
69 * than 256 bytes in size.
70 */
71 if (end - p < 4)
72 goto dodgy_cert;
73 if (p[0] != 0x30 &&
74 p[1] != 0x82)
75 goto dodgy_cert;
76 plen = (p[2] << 8) | p[3];
77 plen += 4;
78 if (plen > end - p)
79 goto dodgy_cert;
80
81 key = key_create_or_update(make_key_ref(modsign_keyring, 1),
82 "asymmetric",
83 NULL,
84 p,
85 plen,
86 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
87 KEY_USR_VIEW,
88 KEY_ALLOC_NOT_IN_QUOTA);
89 if (IS_ERR(key))
90 pr_err("MODSIGN: Problem loading in-kernel X.509 certificate (%ld)\n",
91 PTR_ERR(key));
92 else
93 pr_notice("MODSIGN: Loaded cert '%s'\n",
94 key_ref_to_ptr(key)->description);
95 p += plen;
96 }
97
98 return 0;
99
100dodgy_cert:
101 pr_err("MODSIGN: Problem parsing in-kernel X.509 certificate list\n");
102 return 0;
103}
104late_initcall(load_module_signing_keys);
diff --git a/kernel/module-internal.h b/kernel/module-internal.h
index 24f9247b7d02..915e123a430f 100644
--- a/kernel/module-internal.h
+++ b/kernel/module-internal.h
@@ -9,6 +9,4 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12extern struct key *modsign_keyring;
13
14extern int mod_verify_sig(const void *mod, unsigned long *_modlen); 12extern int mod_verify_sig(const void *mod, unsigned long *_modlen);
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index f2970bddc5ea..be5b8fac4bd0 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -14,6 +14,7 @@
14#include <crypto/public_key.h> 14#include <crypto/public_key.h>
15#include <crypto/hash.h> 15#include <crypto/hash.h>
16#include <keys/asymmetric-type.h> 16#include <keys/asymmetric-type.h>
17#include <keys/system_keyring.h>
17#include "module-internal.h" 18#include "module-internal.h"
18 19
19/* 20/*
@@ -28,7 +29,7 @@
28 */ 29 */
29struct module_signature { 30struct module_signature {
30 u8 algo; /* Public-key crypto algorithm [enum pkey_algo] */ 31 u8 algo; /* Public-key crypto algorithm [enum pkey_algo] */
31 u8 hash; /* Digest algorithm [enum pkey_hash_algo] */ 32 u8 hash; /* Digest algorithm [enum hash_algo] */
32 u8 id_type; /* Key identifier type [enum pkey_id_type] */ 33 u8 id_type; /* Key identifier type [enum pkey_id_type] */
33 u8 signer_len; /* Length of signer's name */ 34 u8 signer_len; /* Length of signer's name */
34 u8 key_id_len; /* Length of key identifier */ 35 u8 key_id_len; /* Length of key identifier */
@@ -39,7 +40,7 @@ struct module_signature {
39/* 40/*
40 * Digest the module contents. 41 * Digest the module contents.
41 */ 42 */
42static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash, 43static struct public_key_signature *mod_make_digest(enum hash_algo hash,
43 const void *mod, 44 const void *mod,
44 unsigned long modlen) 45 unsigned long modlen)
45{ 46{
@@ -54,7 +55,7 @@ static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash,
54 /* Allocate the hashing algorithm we're going to need and find out how 55 /* Allocate the hashing algorithm we're going to need and find out how
55 * big the hash operational data will be. 56 * big the hash operational data will be.
56 */ 57 */
57 tfm = crypto_alloc_shash(pkey_hash_algo[hash], 0, 0); 58 tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
58 if (IS_ERR(tfm)) 59 if (IS_ERR(tfm))
59 return (PTR_ERR(tfm) == -ENOENT) ? ERR_PTR(-ENOPKG) : ERR_CAST(tfm); 60 return (PTR_ERR(tfm) == -ENOENT) ? ERR_PTR(-ENOPKG) : ERR_CAST(tfm);
60 61
@@ -157,7 +158,7 @@ static struct key *request_asymmetric_key(const char *signer, size_t signer_len,
157 158
158 pr_debug("Look up: \"%s\"\n", id); 159 pr_debug("Look up: \"%s\"\n", id);
159 160
160 key = keyring_search(make_key_ref(modsign_keyring, 1), 161 key = keyring_search(make_key_ref(system_trusted_keyring, 1),
161 &key_type_asymmetric, id); 162 &key_type_asymmetric, id);
162 if (IS_ERR(key)) 163 if (IS_ERR(key))
163 pr_warn("Request for unknown module key '%s' err %ld\n", 164 pr_warn("Request for unknown module key '%s' err %ld\n",
@@ -217,7 +218,7 @@ int mod_verify_sig(const void *mod, unsigned long *_modlen)
217 return -ENOPKG; 218 return -ENOPKG;
218 219
219 if (ms.hash >= PKEY_HASH__LAST || 220 if (ms.hash >= PKEY_HASH__LAST ||
220 !pkey_hash_algo[ms.hash]) 221 !hash_algo_name[ms.hash])
221 return -ENOPKG; 222 return -ENOPKG;
222 223
223 key = request_asymmetric_key(sig, ms.signer_len, 224 key = request_asymmetric_key(sig, ms.signer_len,
diff --git a/kernel/padata.c b/kernel/padata.c
index 07af2c95dcfe..2abd25d79cc8 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -46,6 +46,7 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
46 46
47static int padata_cpu_hash(struct parallel_data *pd) 47static int padata_cpu_hash(struct parallel_data *pd)
48{ 48{
49 unsigned int seq_nr;
49 int cpu_index; 50 int cpu_index;
50 51
51 /* 52 /*
@@ -53,10 +54,8 @@ static int padata_cpu_hash(struct parallel_data *pd)
53 * seq_nr mod. number of cpus in use. 54 * seq_nr mod. number of cpus in use.
54 */ 55 */
55 56
56 spin_lock(&pd->seq_lock); 57 seq_nr = atomic_inc_return(&pd->seq_nr);
57 cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu); 58 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
58 pd->seq_nr++;
59 spin_unlock(&pd->seq_lock);
60 59
61 return padata_index_to_cpu(pd, cpu_index); 60 return padata_index_to_cpu(pd, cpu_index);
62} 61}
@@ -429,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
429 padata_init_pqueues(pd); 428 padata_init_pqueues(pd);
430 padata_init_squeues(pd); 429 padata_init_squeues(pd);
431 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); 430 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
432 pd->seq_nr = 0; 431 atomic_set(&pd->seq_nr, -1);
433 atomic_set(&pd->reorder_objects, 0); 432 atomic_set(&pd->reorder_objects, 0);
434 atomic_set(&pd->refcnt, 0); 433 atomic_set(&pd->refcnt, 0);
435 pd->pinst = pinst; 434 pd->pinst = pinst;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 10c22cae83a0..b38109e204af 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -792,7 +792,8 @@ void free_basic_memory_bitmaps(void)
792{ 792{
793 struct memory_bitmap *bm1, *bm2; 793 struct memory_bitmap *bm1, *bm2;
794 794
795 BUG_ON(!(forbidden_pages_map && free_pages_map)); 795 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
796 return;
796 797
797 bm1 = forbidden_pages_map; 798 bm1 = forbidden_pages_map;
798 bm2 = free_pages_map; 799 bm2 = free_pages_map;
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 24850270c802..98d357584cd6 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -70,6 +70,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
70 data->swap = swsusp_resume_device ? 70 data->swap = swsusp_resume_device ?
71 swap_type_of(swsusp_resume_device, 0, NULL) : -1; 71 swap_type_of(swsusp_resume_device, 0, NULL) : -1;
72 data->mode = O_RDONLY; 72 data->mode = O_RDONLY;
73 data->free_bitmaps = false;
73 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); 74 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
74 if (error) 75 if (error)
75 pm_notifier_call_chain(PM_POST_HIBERNATION); 76 pm_notifier_call_chain(PM_POST_HIBERNATION);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 6abb03dff5c0..08a765232432 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1632,7 +1632,7 @@ module_param(rcu_idle_gp_delay, int, 0644);
1632static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; 1632static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1633module_param(rcu_idle_lazy_gp_delay, int, 0644); 1633module_param(rcu_idle_lazy_gp_delay, int, 0644);
1634 1634
1635extern int tick_nohz_enabled; 1635extern int tick_nohz_active;
1636 1636
1637/* 1637/*
1638 * Try to advance callbacks for all flavors of RCU on the current CPU, but 1638 * Try to advance callbacks for all flavors of RCU on the current CPU, but
@@ -1729,7 +1729,7 @@ static void rcu_prepare_for_idle(int cpu)
1729 int tne; 1729 int tne;
1730 1730
1731 /* Handle nohz enablement switches conservatively. */ 1731 /* Handle nohz enablement switches conservatively. */
1732 tne = ACCESS_ONCE(tick_nohz_enabled); 1732 tne = ACCESS_ONCE(tick_nohz_active);
1733 if (tne != rdtp->tick_nohz_enabled_snap) { 1733 if (tne != rdtp->tick_nohz_enabled_snap) {
1734 if (rcu_cpu_has_callbacks(cpu, NULL)) 1734 if (rcu_cpu_has_callbacks(cpu, NULL))
1735 invoke_rcu_core(); /* force nohz to see update. */ 1735 invoke_rcu_core(); /* force nohz to see update. */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c1808606ee5f..e85cda20ab2b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2660,6 +2660,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
2660 } while (need_resched()); 2660 } while (need_resched());
2661} 2661}
2662EXPORT_SYMBOL(preempt_schedule); 2662EXPORT_SYMBOL(preempt_schedule);
2663#endif /* CONFIG_PREEMPT */
2663 2664
2664/* 2665/*
2665 * this is the entry point to schedule() from kernel preemption 2666 * this is the entry point to schedule() from kernel preemption
@@ -2693,8 +2694,6 @@ asmlinkage void __sched preempt_schedule_irq(void)
2693 exception_exit(prev_state); 2694 exception_exit(prev_state);
2694} 2695}
2695 2696
2696#endif /* CONFIG_PREEMPT */
2697
2698int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 2697int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
2699 void *key) 2698 void *key)
2700{ 2699{
@@ -4762,7 +4761,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
4762 cpumask_clear_cpu(rq->cpu, old_rd->span); 4761 cpumask_clear_cpu(rq->cpu, old_rd->span);
4763 4762
4764 /* 4763 /*
4765 * If we dont want to free the old_rt yet then 4764 * If we dont want to free the old_rd yet then
4766 * set old_rd to NULL to skip the freeing later 4765 * set old_rd to NULL to skip the freeing later
4767 * in this function: 4766 * in this function:
4768 */ 4767 */
@@ -4910,8 +4909,9 @@ static void update_top_cache_domain(int cpu)
4910 if (sd) { 4909 if (sd) {
4911 id = cpumask_first(sched_domain_span(sd)); 4910 id = cpumask_first(sched_domain_span(sd));
4912 size = cpumask_weight(sched_domain_span(sd)); 4911 size = cpumask_weight(sched_domain_span(sd));
4913 rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent); 4912 sd = sd->parent; /* sd_busy */
4914 } 4913 }
4914 rcu_assign_pointer(per_cpu(sd_busy, cpu), sd);
4915 4915
4916 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 4916 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
4917 per_cpu(sd_llc_size, cpu) = size; 4917 per_cpu(sd_llc_size, cpu) = size;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e8b652ebe027..fd773ade1a31 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5379,10 +5379,31 @@ void update_group_power(struct sched_domain *sd, int cpu)
5379 */ 5379 */
5380 5380
5381 for_each_cpu(cpu, sched_group_cpus(sdg)) { 5381 for_each_cpu(cpu, sched_group_cpus(sdg)) {
5382 struct sched_group *sg = cpu_rq(cpu)->sd->groups; 5382 struct sched_group_power *sgp;
5383 struct rq *rq = cpu_rq(cpu);
5383 5384
5384 power_orig += sg->sgp->power_orig; 5385 /*
5385 power += sg->sgp->power; 5386 * build_sched_domains() -> init_sched_groups_power()
5387 * gets here before we've attached the domains to the
5388 * runqueues.
5389 *
5390 * Use power_of(), which is set irrespective of domains
5391 * in update_cpu_power().
5392 *
5393 * This avoids power/power_orig from being 0 and
5394 * causing divide-by-zero issues on boot.
5395 *
5396 * Runtime updates will correct power_orig.
5397 */
5398 if (unlikely(!rq->sd)) {
5399 power_orig += power_of(cpu);
5400 power += power_of(cpu);
5401 continue;
5402 }
5403
5404 sgp = rq->sd->groups->sgp;
5405 power_orig += sgp->power_orig;
5406 power += sgp->power;
5386 } 5407 }
5387 } else { 5408 } else {
5388 /* 5409 /*
diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S
new file mode 100644
index 000000000000..3e9868d47535
--- /dev/null
+++ b/kernel/system_certificates.S
@@ -0,0 +1,20 @@
1#include <linux/export.h>
2#include <linux/init.h>
3
4 __INITRODATA
5
6 .align 8
7 .globl VMLINUX_SYMBOL(system_certificate_list)
8VMLINUX_SYMBOL(system_certificate_list):
9__cert_list_start:
10 .incbin "kernel/x509_certificate_list"
11__cert_list_end:
12
13 .align 8
14 .globl VMLINUX_SYMBOL(system_certificate_list_size)
15VMLINUX_SYMBOL(system_certificate_list_size):
16#ifdef CONFIG_64BIT
17 .quad __cert_list_end - __cert_list_start
18#else
19 .long __cert_list_end - __cert_list_start
20#endif
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c
new file mode 100644
index 000000000000..52ebc70263f4
--- /dev/null
+++ b/kernel/system_keyring.c
@@ -0,0 +1,105 @@
1/* System trusted keyring for trusted public keys
2 *
3 * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/export.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/cred.h>
16#include <linux/err.h>
17#include <keys/asymmetric-type.h>
18#include <keys/system_keyring.h>
19#include "module-internal.h"
20
21struct key *system_trusted_keyring;
22EXPORT_SYMBOL_GPL(system_trusted_keyring);
23
24extern __initconst const u8 system_certificate_list[];
25extern __initconst const unsigned long system_certificate_list_size;
26
27/*
28 * Load the compiled-in keys
29 */
30static __init int system_trusted_keyring_init(void)
31{
32 pr_notice("Initialise system trusted keyring\n");
33
34 system_trusted_keyring =
35 keyring_alloc(".system_keyring",
36 KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
37 ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
38 KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH),
39 KEY_ALLOC_NOT_IN_QUOTA, NULL);
40 if (IS_ERR(system_trusted_keyring))
41 panic("Can't allocate system trusted keyring\n");
42
43 set_bit(KEY_FLAG_TRUSTED_ONLY, &system_trusted_keyring->flags);
44 return 0;
45}
46
47/*
48 * Must be initialised before we try and load the keys into the keyring.
49 */
50device_initcall(system_trusted_keyring_init);
51
52/*
53 * Load the compiled-in list of X.509 certificates.
54 */
55static __init int load_system_certificate_list(void)
56{
57 key_ref_t key;
58 const u8 *p, *end;
59 size_t plen;
60
61 pr_notice("Loading compiled-in X.509 certificates\n");
62
63 p = system_certificate_list;
64 end = p + system_certificate_list_size;
65 while (p < end) {
66 /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
67 * than 256 bytes in size.
68 */
69 if (end - p < 4)
70 goto dodgy_cert;
71 if (p[0] != 0x30 &&
72 p[1] != 0x82)
73 goto dodgy_cert;
74 plen = (p[2] << 8) | p[3];
75 plen += 4;
76 if (plen > end - p)
77 goto dodgy_cert;
78
79 key = key_create_or_update(make_key_ref(system_trusted_keyring, 1),
80 "asymmetric",
81 NULL,
82 p,
83 plen,
84 ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
85 KEY_USR_VIEW | KEY_USR_READ),
86 KEY_ALLOC_NOT_IN_QUOTA |
87 KEY_ALLOC_TRUSTED);
88 if (IS_ERR(key)) {
89 pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
90 PTR_ERR(key));
91 } else {
92 pr_notice("Loaded X.509 cert '%s'\n",
93 key_ref_to_ptr(key)->description);
94 key_ref_put(key);
95 }
96 p += plen;
97 }
98
99 return 0;
100
101dodgy_cert:
102 pr_err("Problem parsing in-kernel X.509 certificate list\n");
103 return 0;
104}
105late_initcall(load_system_certificate_list);
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 9f4618eb51c8..13d2f7cd65db 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -673,17 +673,18 @@ err:
673 nlmsg_free(rep_skb); 673 nlmsg_free(rep_skb);
674} 674}
675 675
676static struct genl_ops taskstats_ops = { 676static const struct genl_ops taskstats_ops[] = {
677 .cmd = TASKSTATS_CMD_GET, 677 {
678 .doit = taskstats_user_cmd, 678 .cmd = TASKSTATS_CMD_GET,
679 .policy = taskstats_cmd_get_policy, 679 .doit = taskstats_user_cmd,
680 .flags = GENL_ADMIN_PERM, 680 .policy = taskstats_cmd_get_policy,
681}; 681 .flags = GENL_ADMIN_PERM,
682 682 },
683static struct genl_ops cgroupstats_ops = { 683 {
684 .cmd = CGROUPSTATS_CMD_GET, 684 .cmd = CGROUPSTATS_CMD_GET,
685 .doit = cgroupstats_user_cmd, 685 .doit = cgroupstats_user_cmd,
686 .policy = cgroupstats_cmd_get_policy, 686 .policy = cgroupstats_cmd_get_policy,
687 },
687}; 688};
688 689
689/* Needed early in initialization */ 690/* Needed early in initialization */
@@ -702,26 +703,13 @@ static int __init taskstats_init(void)
702{ 703{
703 int rc; 704 int rc;
704 705
705 rc = genl_register_family(&family); 706 rc = genl_register_family_with_ops(&family, taskstats_ops);
706 if (rc) 707 if (rc)
707 return rc; 708 return rc;
708 709
709 rc = genl_register_ops(&family, &taskstats_ops);
710 if (rc < 0)
711 goto err;
712
713 rc = genl_register_ops(&family, &cgroupstats_ops);
714 if (rc < 0)
715 goto err_cgroup_ops;
716
717 family_registered = 1; 710 family_registered = 1;
718 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION); 711 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
719 return 0; 712 return 0;
720err_cgroup_ops:
721 genl_unregister_ops(&family, &taskstats_ops);
722err:
723 genl_unregister_family(&family);
724 return rc;
725} 713}
726 714
727/* 715/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 64522ecdfe0e..162b03ab0ad2 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -33,6 +33,21 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
33 */ 33 */
34ktime_t tick_next_period; 34ktime_t tick_next_period;
35ktime_t tick_period; 35ktime_t tick_period;
36
37/*
38 * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
39 * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
40 * variable has two functions:
41 *
42 * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
43 * timekeeping lock all at once. Only the CPU which is assigned to do the
44 * update is handling it.
45 *
46 * 2) Hand off the duty in the NOHZ idle case by setting the value to
47 * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
48 * at it will take over and keep the time keeping alive. The handover
49 * procedure also covers cpu hotplug.
50 */
36int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; 51int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
37 52
38/* 53/*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3612fc77f834..ea20f7d1ac2c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -361,8 +361,8 @@ void __init tick_nohz_init(void)
361/* 361/*
362 * NO HZ enabled ? 362 * NO HZ enabled ?
363 */ 363 */
364int tick_nohz_enabled __read_mostly = 1; 364static int tick_nohz_enabled __read_mostly = 1;
365 365int tick_nohz_active __read_mostly;
366/* 366/*
367 * Enable / Disable tickless mode 367 * Enable / Disable tickless mode
368 */ 368 */
@@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
465 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 465 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
466 ktime_t now, idle; 466 ktime_t now, idle;
467 467
468 if (!tick_nohz_enabled) 468 if (!tick_nohz_active)
469 return -1; 469 return -1;
470 470
471 now = ktime_get(); 471 now = ktime_get();
@@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
506 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 506 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
507 ktime_t now, iowait; 507 ktime_t now, iowait;
508 508
509 if (!tick_nohz_enabled) 509 if (!tick_nohz_active)
510 return -1; 510 return -1;
511 511
512 now = ktime_get(); 512 now = ktime_get();
@@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
711 return false; 711 return false;
712 } 712 }
713 713
714 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 714 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
715 ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
715 return false; 716 return false;
717 }
716 718
717 if (need_resched()) 719 if (need_resched())
718 return false; 720 return false;
@@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void)
799 local_irq_disable(); 801 local_irq_disable();
800 802
801 ts = &__get_cpu_var(tick_cpu_sched); 803 ts = &__get_cpu_var(tick_cpu_sched);
802 /*
803 * set ts->inidle unconditionally. even if the system did not
804 * switch to nohz mode the cpu frequency governers rely on the
805 * update of the idle time accounting in tick_nohz_start_idle().
806 */
807 ts->inidle = 1; 804 ts->inidle = 1;
808 __tick_nohz_idle_enter(ts); 805 __tick_nohz_idle_enter(ts);
809 806
@@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
973 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 970 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
974 ktime_t next; 971 ktime_t next;
975 972
976 if (!tick_nohz_enabled) 973 if (!tick_nohz_active)
977 return; 974 return;
978 975
979 local_irq_disable(); 976 local_irq_disable();
@@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void)
981 local_irq_enable(); 978 local_irq_enable();
982 return; 979 return;
983 } 980 }
984 981 tick_nohz_active = 1;
985 ts->nohz_mode = NOHZ_MODE_LOWRES; 982 ts->nohz_mode = NOHZ_MODE_LOWRES;
986 983
987 /* 984 /*
@@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void)
1139 } 1136 }
1140 1137
1141#ifdef CONFIG_NO_HZ_COMMON 1138#ifdef CONFIG_NO_HZ_COMMON
1142 if (tick_nohz_enabled) 1139 if (tick_nohz_enabled) {
1143 ts->nohz_mode = NOHZ_MODE_HIGHRES; 1140 ts->nohz_mode = NOHZ_MODE_HIGHRES;
1141 tick_nohz_active = 1;
1142 }
1144#endif 1143#endif
1145} 1144}
1146#endif /* HIGH_RES_TIMERS */ 1145#endif /* HIGH_RES_TIMERS */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 3abf53418b67..87b4f00284c9 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1347,7 +1347,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
1347 tk->xtime_nsec -= remainder; 1347 tk->xtime_nsec -= remainder;
1348 tk->xtime_nsec += 1ULL << tk->shift; 1348 tk->xtime_nsec += 1ULL << tk->shift;
1349 tk->ntp_error += remainder << tk->ntp_error_shift; 1349 tk->ntp_error += remainder << tk->ntp_error_shift;
1350 1350 tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
1351} 1351}
1352#else 1352#else
1353#define old_vsyscall_fixup(tk) 1353#define old_vsyscall_fixup(tk)
diff --git a/kernel/timer.c b/kernel/timer.c
index 6582b82fa966..accfd241b9e5 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1518,9 +1518,8 @@ static int init_timers_cpu(int cpu)
1518 /* 1518 /*
1519 * The APs use this path later in boot 1519 * The APs use this path later in boot
1520 */ 1520 */
1521 base = kmalloc_node(sizeof(*base), 1521 base = kzalloc_node(sizeof(*base), GFP_KERNEL,
1522 GFP_KERNEL | __GFP_ZERO, 1522 cpu_to_node(cpu));
1523 cpu_to_node(cpu));
1524 if (!base) 1523 if (!base)
1525 return -ENOMEM; 1524 return -ENOMEM;
1526 1525
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 22fa55696760..0e9f9eaade2f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -367,9 +367,6 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
367 367
368static int __register_ftrace_function(struct ftrace_ops *ops) 368static int __register_ftrace_function(struct ftrace_ops *ops)
369{ 369{
370 if (unlikely(ftrace_disabled))
371 return -ENODEV;
372
373 if (FTRACE_WARN_ON(ops == &global_ops)) 370 if (FTRACE_WARN_ON(ops == &global_ops))
374 return -EINVAL; 371 return -EINVAL;
375 372
@@ -428,9 +425,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
428{ 425{
429 int ret; 426 int ret;
430 427
431 if (ftrace_disabled)
432 return -ENODEV;
433
434 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 428 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
435 return -EBUSY; 429 return -EBUSY;
436 430
@@ -2088,10 +2082,15 @@ static void ftrace_startup_enable(int command)
2088static int ftrace_startup(struct ftrace_ops *ops, int command) 2082static int ftrace_startup(struct ftrace_ops *ops, int command)
2089{ 2083{
2090 bool hash_enable = true; 2084 bool hash_enable = true;
2085 int ret;
2091 2086
2092 if (unlikely(ftrace_disabled)) 2087 if (unlikely(ftrace_disabled))
2093 return -ENODEV; 2088 return -ENODEV;
2094 2089
2090 ret = __register_ftrace_function(ops);
2091 if (ret)
2092 return ret;
2093
2095 ftrace_start_up++; 2094 ftrace_start_up++;
2096 command |= FTRACE_UPDATE_CALLS; 2095 command |= FTRACE_UPDATE_CALLS;
2097 2096
@@ -2113,12 +2112,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
2113 return 0; 2112 return 0;
2114} 2113}
2115 2114
2116static void ftrace_shutdown(struct ftrace_ops *ops, int command) 2115static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2117{ 2116{
2118 bool hash_disable = true; 2117 bool hash_disable = true;
2118 int ret;
2119 2119
2120 if (unlikely(ftrace_disabled)) 2120 if (unlikely(ftrace_disabled))
2121 return; 2121 return -ENODEV;
2122
2123 ret = __unregister_ftrace_function(ops);
2124 if (ret)
2125 return ret;
2122 2126
2123 ftrace_start_up--; 2127 ftrace_start_up--;
2124 /* 2128 /*
@@ -2153,9 +2157,10 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2153 } 2157 }
2154 2158
2155 if (!command || !ftrace_enabled) 2159 if (!command || !ftrace_enabled)
2156 return; 2160 return 0;
2157 2161
2158 ftrace_run_update_code(command); 2162 ftrace_run_update_code(command);
2163 return 0;
2159} 2164}
2160 2165
2161static void ftrace_startup_sysctl(void) 2166static void ftrace_startup_sysctl(void)
@@ -3060,16 +3065,13 @@ static void __enable_ftrace_function_probe(void)
3060 if (i == FTRACE_FUNC_HASHSIZE) 3065 if (i == FTRACE_FUNC_HASHSIZE)
3061 return; 3066 return;
3062 3067
3063 ret = __register_ftrace_function(&trace_probe_ops); 3068 ret = ftrace_startup(&trace_probe_ops, 0);
3064 if (!ret)
3065 ret = ftrace_startup(&trace_probe_ops, 0);
3066 3069
3067 ftrace_probe_registered = 1; 3070 ftrace_probe_registered = 1;
3068} 3071}
3069 3072
3070static void __disable_ftrace_function_probe(void) 3073static void __disable_ftrace_function_probe(void)
3071{ 3074{
3072 int ret;
3073 int i; 3075 int i;
3074 3076
3075 if (!ftrace_probe_registered) 3077 if (!ftrace_probe_registered)
@@ -3082,9 +3084,7 @@ static void __disable_ftrace_function_probe(void)
3082 } 3084 }
3083 3085
3084 /* no more funcs left */ 3086 /* no more funcs left */
3085 ret = __unregister_ftrace_function(&trace_probe_ops); 3087 ftrace_shutdown(&trace_probe_ops, 0);
3086 if (!ret)
3087 ftrace_shutdown(&trace_probe_ops, 0);
3088 3088
3089 ftrace_probe_registered = 0; 3089 ftrace_probe_registered = 0;
3090} 3090}
@@ -4366,12 +4366,15 @@ core_initcall(ftrace_nodyn_init);
4366static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } 4366static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4367static inline void ftrace_startup_enable(int command) { } 4367static inline void ftrace_startup_enable(int command) { }
4368/* Keep as macros so we do not need to define the commands */ 4368/* Keep as macros so we do not need to define the commands */
4369# define ftrace_startup(ops, command) \ 4369# define ftrace_startup(ops, command) \
4370 ({ \ 4370 ({ \
4371 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ 4371 int ___ret = __register_ftrace_function(ops); \
4372 0; \ 4372 if (!___ret) \
4373 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4374 ___ret; \
4373 }) 4375 })
4374# define ftrace_shutdown(ops, command) do { } while (0) 4376# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
4377
4375# define ftrace_startup_sysctl() do { } while (0) 4378# define ftrace_startup_sysctl() do { } while (0)
4376# define ftrace_shutdown_sysctl() do { } while (0) 4379# define ftrace_shutdown_sysctl() do { } while (0)
4377 4380
@@ -4780,9 +4783,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
4780 4783
4781 mutex_lock(&ftrace_lock); 4784 mutex_lock(&ftrace_lock);
4782 4785
4783 ret = __register_ftrace_function(ops); 4786 ret = ftrace_startup(ops, 0);
4784 if (!ret)
4785 ret = ftrace_startup(ops, 0);
4786 4787
4787 mutex_unlock(&ftrace_lock); 4788 mutex_unlock(&ftrace_lock);
4788 4789
@@ -4801,9 +4802,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
4801 int ret; 4802 int ret;
4802 4803
4803 mutex_lock(&ftrace_lock); 4804 mutex_lock(&ftrace_lock);
4804 ret = __unregister_ftrace_function(ops); 4805 ret = ftrace_shutdown(ops, 0);
4805 if (!ret)
4806 ftrace_shutdown(ops, 0);
4807 mutex_unlock(&ftrace_lock); 4806 mutex_unlock(&ftrace_lock);
4808 4807
4809 return ret; 4808 return ret;
@@ -4997,6 +4996,13 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4997 return NOTIFY_DONE; 4996 return NOTIFY_DONE;
4998} 4997}
4999 4998
4999/* Just a place holder for function graph */
5000static struct ftrace_ops fgraph_ops __read_mostly = {
5001 .func = ftrace_stub,
5002 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5003 FTRACE_OPS_FL_RECURSION_SAFE,
5004};
5005
5000int register_ftrace_graph(trace_func_graph_ret_t retfunc, 5006int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5001 trace_func_graph_ent_t entryfunc) 5007 trace_func_graph_ent_t entryfunc)
5002{ 5008{
@@ -5023,7 +5029,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5023 ftrace_graph_return = retfunc; 5029 ftrace_graph_return = retfunc;
5024 ftrace_graph_entry = entryfunc; 5030 ftrace_graph_entry = entryfunc;
5025 5031
5026 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); 5032 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
5027 5033
5028out: 5034out:
5029 mutex_unlock(&ftrace_lock); 5035 mutex_unlock(&ftrace_lock);
@@ -5040,7 +5046,7 @@ void unregister_ftrace_graph(void)
5040 ftrace_graph_active--; 5046 ftrace_graph_active--;
5041 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5047 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5042 ftrace_graph_entry = ftrace_graph_entry_stub; 5048 ftrace_graph_entry = ftrace_graph_entry_stub;
5043 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); 5049 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
5044 unregister_pm_notifier(&ftrace_suspend_notifier); 5050 unregister_pm_notifier(&ftrace_suspend_notifier);
5045 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5051 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5046 5052
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f919a2e21bf3..a11800ae96de 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2314,6 +2314,9 @@ int event_trace_del_tracer(struct trace_array *tr)
2314 /* Disable any running events */ 2314 /* Disable any running events */
2315 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 2315 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2316 2316
2317 /* Access to events are within rcu_read_lock_sched() */
2318 synchronize_sched();
2319
2317 down_write(&trace_event_sem); 2320 down_write(&trace_event_sem);
2318 __trace_remove_event_dirs(tr); 2321 __trace_remove_event_dirs(tr);
2319 debugfs_remove_recursive(tr->event_dir); 2322 debugfs_remove_recursive(tr->event_dir);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index e4b6d11bdf78..ea90eb5f6f17 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
431 if (!tr->sys_refcount_enter) 431 if (!tr->sys_refcount_enter)
432 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 432 unregister_trace_sys_enter(ftrace_syscall_enter, tr);
433 mutex_unlock(&syscall_trace_lock); 433 mutex_unlock(&syscall_trace_lock);
434 /*
435 * Callers expect the event to be completely disabled on
436 * return, so wait for current handlers to finish.
437 */
438 synchronize_sched();
439} 434}
440 435
441static int reg_event_syscall_exit(struct ftrace_event_file *file, 436static int reg_event_syscall_exit(struct ftrace_event_file *file,
@@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
474 if (!tr->sys_refcount_exit) 469 if (!tr->sys_refcount_exit)
475 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 470 unregister_trace_sys_exit(ftrace_syscall_exit, tr);
476 mutex_unlock(&syscall_trace_lock); 471 mutex_unlock(&syscall_trace_lock);
477 /*
478 * Callers expect the event to be completely disabled on
479 * return, so wait for current handlers to finish.
480 */
481 synchronize_sched();
482} 472}
483 473
484static int __init init_syscall_trace(struct ftrace_event_call *call) 474static int __init init_syscall_trace(struct ftrace_event_call *call)
diff --git a/kernel/user.c b/kernel/user.c
index 5bbb91988e69..a3a0dbfda329 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -51,6 +51,10 @@ struct user_namespace init_user_ns = {
51 .owner = GLOBAL_ROOT_UID, 51 .owner = GLOBAL_ROOT_UID,
52 .group = GLOBAL_ROOT_GID, 52 .group = GLOBAL_ROOT_GID,
53 .proc_inum = PROC_USER_INIT_INO, 53 .proc_inum = PROC_USER_INIT_INO,
54#ifdef CONFIG_KEYS_KERBEROS_CACHE
55 .krb_cache_register_sem =
56 __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem),
57#endif
54}; 58};
55EXPORT_SYMBOL_GPL(init_user_ns); 59EXPORT_SYMBOL_GPL(init_user_ns);
56 60
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 13fb1134ba58..240fb62cf394 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -101,6 +101,9 @@ int create_user_ns(struct cred *new)
101 101
102 set_cred_user_ns(new, ns); 102 set_cred_user_ns(new, ns);
103 103
104#ifdef CONFIG_PERSISTENT_KEYRINGS
105 init_rwsem(&ns->persistent_keyring_register_sem);
106#endif
104 return 0; 107 return 0;
105} 108}
106 109
@@ -130,6 +133,9 @@ void free_user_ns(struct user_namespace *ns)
130 133
131 do { 134 do {
132 parent = ns->parent; 135 parent = ns->parent;
136#ifdef CONFIG_PERSISTENT_KEYRINGS
137 key_put(ns->persistent_keyring_register);
138#endif
133 proc_free_inum(ns->proc_inum); 139 proc_free_inum(ns->proc_inum);
134 kmem_cache_free(user_ns_cachep, ns); 140 kmem_cache_free(user_ns_cachep, ns);
135 ns = parent; 141 ns = parent;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 987293d03ebc..b010eac595d2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -305,6 +305,9 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
305/* I: attributes used when instantiating standard unbound pools on demand */ 305/* I: attributes used when instantiating standard unbound pools on demand */
306static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 306static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
307 307
308/* I: attributes used when instantiating ordered pools on demand */
309static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
310
308struct workqueue_struct *system_wq __read_mostly; 311struct workqueue_struct *system_wq __read_mostly;
309EXPORT_SYMBOL(system_wq); 312EXPORT_SYMBOL(system_wq);
310struct workqueue_struct *system_highpri_wq __read_mostly; 313struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -518,14 +521,21 @@ static inline void debug_work_activate(struct work_struct *work) { }
518static inline void debug_work_deactivate(struct work_struct *work) { } 521static inline void debug_work_deactivate(struct work_struct *work) { }
519#endif 522#endif
520 523
521/* allocate ID and assign it to @pool */ 524/**
525 * worker_pool_assign_id - allocate ID and assing it to @pool
526 * @pool: the pool pointer of interest
527 *
528 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
529 * successfully, -errno on failure.
530 */
522static int worker_pool_assign_id(struct worker_pool *pool) 531static int worker_pool_assign_id(struct worker_pool *pool)
523{ 532{
524 int ret; 533 int ret;
525 534
526 lockdep_assert_held(&wq_pool_mutex); 535 lockdep_assert_held(&wq_pool_mutex);
527 536
528 ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); 537 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
538 GFP_KERNEL);
529 if (ret >= 0) { 539 if (ret >= 0) {
530 pool->id = ret; 540 pool->id = ret;
531 return 0; 541 return 0;
@@ -1320,7 +1330,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1320 1330
1321 debug_work_activate(work); 1331 debug_work_activate(work);
1322 1332
1323 /* if dying, only works from the same workqueue are allowed */ 1333 /* if draining, only works from the same workqueue are allowed */
1324 if (unlikely(wq->flags & __WQ_DRAINING) && 1334 if (unlikely(wq->flags & __WQ_DRAINING) &&
1325 WARN_ON_ONCE(!is_chained_work(wq))) 1335 WARN_ON_ONCE(!is_chained_work(wq)))
1326 return; 1336 return;
@@ -1736,16 +1746,17 @@ static struct worker *create_worker(struct worker_pool *pool)
1736 if (IS_ERR(worker->task)) 1746 if (IS_ERR(worker->task))
1737 goto fail; 1747 goto fail;
1738 1748
1749 set_user_nice(worker->task, pool->attrs->nice);
1750
1751 /* prevent userland from meddling with cpumask of workqueue workers */
1752 worker->task->flags |= PF_NO_SETAFFINITY;
1753
1739 /* 1754 /*
1740 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any 1755 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1741 * online CPUs. It'll be re-applied when any of the CPUs come up. 1756 * online CPUs. It'll be re-applied when any of the CPUs come up.
1742 */ 1757 */
1743 set_user_nice(worker->task, pool->attrs->nice);
1744 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 1758 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1745 1759
1746 /* prevent userland from meddling with cpumask of workqueue workers */
1747 worker->task->flags |= PF_NO_SETAFFINITY;
1748
1749 /* 1760 /*
1750 * The caller is responsible for ensuring %POOL_DISASSOCIATED 1761 * The caller is responsible for ensuring %POOL_DISASSOCIATED
1751 * remains stable across this function. See the comments above the 1762 * remains stable across this function. See the comments above the
@@ -2840,19 +2851,6 @@ already_gone:
2840 return false; 2851 return false;
2841} 2852}
2842 2853
2843static bool __flush_work(struct work_struct *work)
2844{
2845 struct wq_barrier barr;
2846
2847 if (start_flush_work(work, &barr)) {
2848 wait_for_completion(&barr.done);
2849 destroy_work_on_stack(&barr.work);
2850 return true;
2851 } else {
2852 return false;
2853 }
2854}
2855
2856/** 2854/**
2857 * flush_work - wait for a work to finish executing the last queueing instance 2855 * flush_work - wait for a work to finish executing the last queueing instance
2858 * @work: the work to flush 2856 * @work: the work to flush
@@ -2866,10 +2864,18 @@ static bool __flush_work(struct work_struct *work)
2866 */ 2864 */
2867bool flush_work(struct work_struct *work) 2865bool flush_work(struct work_struct *work)
2868{ 2866{
2867 struct wq_barrier barr;
2868
2869 lock_map_acquire(&work->lockdep_map); 2869 lock_map_acquire(&work->lockdep_map);
2870 lock_map_release(&work->lockdep_map); 2870 lock_map_release(&work->lockdep_map);
2871 2871
2872 return __flush_work(work); 2872 if (start_flush_work(work, &barr)) {
2873 wait_for_completion(&barr.done);
2874 destroy_work_on_stack(&barr.work);
2875 return true;
2876 } else {
2877 return false;
2878 }
2873} 2879}
2874EXPORT_SYMBOL_GPL(flush_work); 2880EXPORT_SYMBOL_GPL(flush_work);
2875 2881
@@ -4106,7 +4112,7 @@ out_unlock:
4106static int alloc_and_link_pwqs(struct workqueue_struct *wq) 4112static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4107{ 4113{
4108 bool highpri = wq->flags & WQ_HIGHPRI; 4114 bool highpri = wq->flags & WQ_HIGHPRI;
4109 int cpu; 4115 int cpu, ret;
4110 4116
4111 if (!(wq->flags & WQ_UNBOUND)) { 4117 if (!(wq->flags & WQ_UNBOUND)) {
4112 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); 4118 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
@@ -4126,6 +4132,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4126 mutex_unlock(&wq->mutex); 4132 mutex_unlock(&wq->mutex);
4127 } 4133 }
4128 return 0; 4134 return 0;
4135 } else if (wq->flags & __WQ_ORDERED) {
4136 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4137 /* there should only be single pwq for ordering guarantee */
4138 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4139 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4140 "ordering guarantee broken for workqueue %s\n", wq->name);
4141 return ret;
4129 } else { 4142 } else {
4130 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 4143 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4131 } 4144 }
@@ -4814,14 +4827,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4814 4827
4815 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4828 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4816 schedule_work_on(cpu, &wfc.work); 4829 schedule_work_on(cpu, &wfc.work);
4817 4830 flush_work(&wfc.work);
4818 /*
4819 * The work item is on-stack and can't lead to deadlock through
4820 * flushing. Use __flush_work() to avoid spurious lockdep warnings
4821 * when work_on_cpu()s are nested.
4822 */
4823 __flush_work(&wfc.work);
4824
4825 return wfc.ret; 4831 return wfc.ret;
4826} 4832}
4827EXPORT_SYMBOL_GPL(work_on_cpu); 4833EXPORT_SYMBOL_GPL(work_on_cpu);
@@ -5009,10 +5015,6 @@ static int __init init_workqueues(void)
5009 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 5015 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5010 int i, cpu; 5016 int i, cpu;
5011 5017
5012 /* make sure we have enough bits for OFFQ pool ID */
5013 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
5014 WORK_CPU_END * NR_STD_WORKER_POOLS);
5015
5016 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 5018 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5017 5019
5018 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 5020 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
@@ -5051,13 +5053,23 @@ static int __init init_workqueues(void)
5051 } 5053 }
5052 } 5054 }
5053 5055
5054 /* create default unbound wq attrs */ 5056 /* create default unbound and ordered wq attrs */
5055 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 5057 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5056 struct workqueue_attrs *attrs; 5058 struct workqueue_attrs *attrs;
5057 5059
5058 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 5060 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5059 attrs->nice = std_nice[i]; 5061 attrs->nice = std_nice[i];
5060 unbound_std_wq_attrs[i] = attrs; 5062 unbound_std_wq_attrs[i] = attrs;
5063
5064 /*
5065 * An ordered wq should have only one pwq as ordering is
5066 * guaranteed by max_active which is enforced by pwqs.
5067 * Turn off NUMA so that dfl_pwq is used for all nodes.
5068 */
5069 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5070 attrs->nice = std_nice[i];
5071 attrs->no_numa = true;
5072 ordered_wq_attrs[i] = attrs;
5061 } 5073 }
5062 5074
5063 system_wq = alloc_workqueue("events", 0, 0); 5075 system_wq = alloc_workqueue("events", 0, 0);
diff --git a/lib/Kconfig b/lib/Kconfig
index 06dc74200a51..991c98bc4a3f 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -322,6 +322,20 @@ config TEXTSEARCH_FSM
322config BTREE 322config BTREE
323 boolean 323 boolean
324 324
325config ASSOCIATIVE_ARRAY
326 bool
327 help
328 Generic associative array. Can be searched and iterated over whilst
329 it is being modified. It is also reasonably quick to search and
330 modify. The algorithms are non-recursive, and the trees are highly
331 capacious.
332
333 See:
334
335 Documentation/assoc_array.txt
336
337 for more information.
338
325config HAS_IOMEM 339config HAS_IOMEM
326 boolean 340 boolean
327 depends on !NO_IOMEM 341 depends on !NO_IOMEM
diff --git a/lib/Makefile b/lib/Makefile
index d480a8c92385..a459c31e8c6b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o percpu-refcount.o percpu_ida.o 16 earlycpio.o
17 17
18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o 18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
19lib-$(CONFIG_MMU) += ioremap.o 19lib-$(CONFIG_MMU) += ioremap.o
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ 27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ 28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
29 percpu_ida.o 29 percpu-refcount.o percpu_ida.o
30obj-y += string_helpers.o 30obj-y += string_helpers.o
31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
32obj-y += kstrtox.o 32obj-y += kstrtox.o
@@ -47,6 +47,7 @@ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
47obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 47obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
48 48
49obj-$(CONFIG_BTREE) += btree.o 49obj-$(CONFIG_BTREE) += btree.o
50obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
50obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 51obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
51obj-$(CONFIG_DEBUG_LIST) += list_debug.o 52obj-$(CONFIG_DEBUG_LIST) += list_debug.o
52obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o 53obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
new file mode 100644
index 000000000000..1b6a44f1ec3e
--- /dev/null
+++ b/lib/assoc_array.c
@@ -0,0 +1,1746 @@
1/* Generic associative array implementation.
2 *
3 * See Documentation/assoc_array.txt for information.
4 *
5 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
6 * Written by David Howells (dhowells@redhat.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public Licence
10 * as published by the Free Software Foundation; either version
11 * 2 of the Licence, or (at your option) any later version.
12 */
13//#define DEBUG
14#include <linux/slab.h>
15#include <linux/err.h>
16#include <linux/assoc_array_priv.h>
17
18/*
19 * Iterate over an associative array. The caller must hold the RCU read lock
20 * or better.
21 */
22static int assoc_array_subtree_iterate(const struct assoc_array_ptr *root,
23 const struct assoc_array_ptr *stop,
24 int (*iterator)(const void *leaf,
25 void *iterator_data),
26 void *iterator_data)
27{
28 const struct assoc_array_shortcut *shortcut;
29 const struct assoc_array_node *node;
30 const struct assoc_array_ptr *cursor, *ptr, *parent;
31 unsigned long has_meta;
32 int slot, ret;
33
34 cursor = root;
35
36begin_node:
37 if (assoc_array_ptr_is_shortcut(cursor)) {
38 /* Descend through a shortcut */
39 shortcut = assoc_array_ptr_to_shortcut(cursor);
40 smp_read_barrier_depends();
41 cursor = ACCESS_ONCE(shortcut->next_node);
42 }
43
44 node = assoc_array_ptr_to_node(cursor);
45 smp_read_barrier_depends();
46 slot = 0;
47
48 /* We perform two passes of each node.
49 *
50 * The first pass does all the leaves in this node. This means we
51 * don't miss any leaves if the node is split up by insertion whilst
52 * we're iterating over the branches rooted here (we may, however, see
53 * some leaves twice).
54 */
55 has_meta = 0;
56 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
57 ptr = ACCESS_ONCE(node->slots[slot]);
58 has_meta |= (unsigned long)ptr;
59 if (ptr && assoc_array_ptr_is_leaf(ptr)) {
60 /* We need a barrier between the read of the pointer
61 * and dereferencing the pointer - but only if we are
62 * actually going to dereference it.
63 */
64 smp_read_barrier_depends();
65
66 /* Invoke the callback */
67 ret = iterator(assoc_array_ptr_to_leaf(ptr),
68 iterator_data);
69 if (ret)
70 return ret;
71 }
72 }
73
74 /* The second pass attends to all the metadata pointers. If we follow
75 * one of these we may find that we don't come back here, but rather go
76 * back to a replacement node with the leaves in a different layout.
77 *
78 * We are guaranteed to make progress, however, as the slot number for
79 * a particular portion of the key space cannot change - and we
80 * continue at the back pointer + 1.
81 */
82 if (!(has_meta & ASSOC_ARRAY_PTR_META_TYPE))
83 goto finished_node;
84 slot = 0;
85
86continue_node:
87 node = assoc_array_ptr_to_node(cursor);
88 smp_read_barrier_depends();
89
90 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
91 ptr = ACCESS_ONCE(node->slots[slot]);
92 if (assoc_array_ptr_is_meta(ptr)) {
93 cursor = ptr;
94 goto begin_node;
95 }
96 }
97
98finished_node:
99 /* Move up to the parent (may need to skip back over a shortcut) */
100 parent = ACCESS_ONCE(node->back_pointer);
101 slot = node->parent_slot;
102 if (parent == stop)
103 return 0;
104
105 if (assoc_array_ptr_is_shortcut(parent)) {
106 shortcut = assoc_array_ptr_to_shortcut(parent);
107 smp_read_barrier_depends();
108 cursor = parent;
109 parent = ACCESS_ONCE(shortcut->back_pointer);
110 slot = shortcut->parent_slot;
111 if (parent == stop)
112 return 0;
113 }
114
115 /* Ascend to next slot in parent node */
116 cursor = parent;
117 slot++;
118 goto continue_node;
119}
120
121/**
122 * assoc_array_iterate - Pass all objects in the array to a callback
123 * @array: The array to iterate over.
124 * @iterator: The callback function.
125 * @iterator_data: Private data for the callback function.
126 *
127 * Iterate over all the objects in an associative array. Each one will be
128 * presented to the iterator function.
129 *
130 * If the array is being modified concurrently with the iteration then it is
131 * possible that some objects in the array will be passed to the iterator
132 * callback more than once - though every object should be passed at least
133 * once. If this is undesirable then the caller must lock against modification
134 * for the duration of this function.
135 *
136 * The function will return 0 if no objects were in the array or else it will
137 * return the result of the last iterator function called. Iteration stops
138 * immediately if any call to the iteration function results in a non-zero
139 * return.
140 *
141 * The caller should hold the RCU read lock or better if concurrent
142 * modification is possible.
143 */
144int assoc_array_iterate(const struct assoc_array *array,
145 int (*iterator)(const void *object,
146 void *iterator_data),
147 void *iterator_data)
148{
149 struct assoc_array_ptr *root = ACCESS_ONCE(array->root);
150
151 if (!root)
152 return 0;
153 return assoc_array_subtree_iterate(root, NULL, iterator, iterator_data);
154}
155
156enum assoc_array_walk_status {
157 assoc_array_walk_tree_empty,
158 assoc_array_walk_found_terminal_node,
159 assoc_array_walk_found_wrong_shortcut,
160} status;
161
162struct assoc_array_walk_result {
163 struct {
164 struct assoc_array_node *node; /* Node in which leaf might be found */
165 int level;
166 int slot;
167 } terminal_node;
168 struct {
169 struct assoc_array_shortcut *shortcut;
170 int level;
171 int sc_level;
172 unsigned long sc_segments;
173 unsigned long dissimilarity;
174 } wrong_shortcut;
175};
176
177/*
178 * Navigate through the internal tree looking for the closest node to the key.
179 */
180static enum assoc_array_walk_status
181assoc_array_walk(const struct assoc_array *array,
182 const struct assoc_array_ops *ops,
183 const void *index_key,
184 struct assoc_array_walk_result *result)
185{
186 struct assoc_array_shortcut *shortcut;
187 struct assoc_array_node *node;
188 struct assoc_array_ptr *cursor, *ptr;
189 unsigned long sc_segments, dissimilarity;
190 unsigned long segments;
191 int level, sc_level, next_sc_level;
192 int slot;
193
194 pr_devel("-->%s()\n", __func__);
195
196 cursor = ACCESS_ONCE(array->root);
197 if (!cursor)
198 return assoc_array_walk_tree_empty;
199
200 level = 0;
201
202 /* Use segments from the key for the new leaf to navigate through the
203 * internal tree, skipping through nodes and shortcuts that are on
204 * route to the destination. Eventually we'll come to a slot that is
205 * either empty or contains a leaf at which point we've found a node in
206 * which the leaf we're looking for might be found or into which it
207 * should be inserted.
208 */
209jumped:
210 segments = ops->get_key_chunk(index_key, level);
211 pr_devel("segments[%d]: %lx\n", level, segments);
212
213 if (assoc_array_ptr_is_shortcut(cursor))
214 goto follow_shortcut;
215
216consider_node:
217 node = assoc_array_ptr_to_node(cursor);
218 smp_read_barrier_depends();
219
220 slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
221 slot &= ASSOC_ARRAY_FAN_MASK;
222 ptr = ACCESS_ONCE(node->slots[slot]);
223
224 pr_devel("consider slot %x [ix=%d type=%lu]\n",
225 slot, level, (unsigned long)ptr & 3);
226
227 if (!assoc_array_ptr_is_meta(ptr)) {
228 /* The node doesn't have a node/shortcut pointer in the slot
229 * corresponding to the index key that we have to follow.
230 */
231 result->terminal_node.node = node;
232 result->terminal_node.level = level;
233 result->terminal_node.slot = slot;
234 pr_devel("<--%s() = terminal_node\n", __func__);
235 return assoc_array_walk_found_terminal_node;
236 }
237
238 if (assoc_array_ptr_is_node(ptr)) {
239 /* There is a pointer to a node in the slot corresponding to
240 * this index key segment, so we need to follow it.
241 */
242 cursor = ptr;
243 level += ASSOC_ARRAY_LEVEL_STEP;
244 if ((level & ASSOC_ARRAY_KEY_CHUNK_MASK) != 0)
245 goto consider_node;
246 goto jumped;
247 }
248
249 /* There is a shortcut in the slot corresponding to the index key
250 * segment. We follow the shortcut if its partial index key matches
251 * this leaf's. Otherwise we need to split the shortcut.
252 */
253 cursor = ptr;
254follow_shortcut:
255 shortcut = assoc_array_ptr_to_shortcut(cursor);
256 smp_read_barrier_depends();
257 pr_devel("shortcut to %d\n", shortcut->skip_to_level);
258 sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
259 BUG_ON(sc_level > shortcut->skip_to_level);
260
261 do {
262 /* Check the leaf against the shortcut's index key a word at a
263 * time, trimming the final word (the shortcut stores the index
264 * key completely from the root to the shortcut's target).
265 */
266 if ((sc_level & ASSOC_ARRAY_KEY_CHUNK_MASK) == 0)
267 segments = ops->get_key_chunk(index_key, sc_level);
268
269 sc_segments = shortcut->index_key[sc_level >> ASSOC_ARRAY_KEY_CHUNK_SHIFT];
270 dissimilarity = segments ^ sc_segments;
271
272 if (round_up(sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE) > shortcut->skip_to_level) {
273 /* Trim segments that are beyond the shortcut */
274 int shift = shortcut->skip_to_level & ASSOC_ARRAY_KEY_CHUNK_MASK;
275 dissimilarity &= ~(ULONG_MAX << shift);
276 next_sc_level = shortcut->skip_to_level;
277 } else {
278 next_sc_level = sc_level + ASSOC_ARRAY_KEY_CHUNK_SIZE;
279 next_sc_level = round_down(next_sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
280 }
281
282 if (dissimilarity != 0) {
283 /* This shortcut points elsewhere */
284 result->wrong_shortcut.shortcut = shortcut;
285 result->wrong_shortcut.level = level;
286 result->wrong_shortcut.sc_level = sc_level;
287 result->wrong_shortcut.sc_segments = sc_segments;
288 result->wrong_shortcut.dissimilarity = dissimilarity;
289 return assoc_array_walk_found_wrong_shortcut;
290 }
291
292 sc_level = next_sc_level;
293 } while (sc_level < shortcut->skip_to_level);
294
295 /* The shortcut matches the leaf's index to this point. */
296 cursor = ACCESS_ONCE(shortcut->next_node);
297 if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
298 level = sc_level;
299 goto jumped;
300 } else {
301 level = sc_level;
302 goto consider_node;
303 }
304}
305
306/**
307 * assoc_array_find - Find an object by index key
308 * @array: The associative array to search.
309 * @ops: The operations to use.
310 * @index_key: The key to the object.
311 *
312 * Find an object in an associative array by walking through the internal tree
313 * to the node that should contain the object and then searching the leaves
314 * there. NULL is returned if the requested object was not found in the array.
315 *
316 * The caller must hold the RCU read lock or better.
317 */
318void *assoc_array_find(const struct assoc_array *array,
319 const struct assoc_array_ops *ops,
320 const void *index_key)
321{
322 struct assoc_array_walk_result result;
323 const struct assoc_array_node *node;
324 const struct assoc_array_ptr *ptr;
325 const void *leaf;
326 int slot;
327
328 if (assoc_array_walk(array, ops, index_key, &result) !=
329 assoc_array_walk_found_terminal_node)
330 return NULL;
331
332 node = result.terminal_node.node;
333 smp_read_barrier_depends();
334
335 /* If the target key is available to us, it's has to be pointed to by
336 * the terminal node.
337 */
338 for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
339 ptr = ACCESS_ONCE(node->slots[slot]);
340 if (ptr && assoc_array_ptr_is_leaf(ptr)) {
341 /* We need a barrier between the read of the pointer
342 * and dereferencing the pointer - but only if we are
343 * actually going to dereference it.
344 */
345 leaf = assoc_array_ptr_to_leaf(ptr);
346 smp_read_barrier_depends();
347 if (ops->compare_object(leaf, index_key))
348 return (void *)leaf;
349 }
350 }
351
352 return NULL;
353}
354
355/*
356 * Destructively iterate over an associative array. The caller must prevent
357 * other simultaneous accesses.
358 */
359static void assoc_array_destroy_subtree(struct assoc_array_ptr *root,
360 const struct assoc_array_ops *ops)
361{
362 struct assoc_array_shortcut *shortcut;
363 struct assoc_array_node *node;
364 struct assoc_array_ptr *cursor, *parent = NULL;
365 int slot = -1;
366
367 pr_devel("-->%s()\n", __func__);
368
369 cursor = root;
370 if (!cursor) {
371 pr_devel("empty\n");
372 return;
373 }
374
375move_to_meta:
376 if (assoc_array_ptr_is_shortcut(cursor)) {
377 /* Descend through a shortcut */
378 pr_devel("[%d] shortcut\n", slot);
379 BUG_ON(!assoc_array_ptr_is_shortcut(cursor));
380 shortcut = assoc_array_ptr_to_shortcut(cursor);
381 BUG_ON(shortcut->back_pointer != parent);
382 BUG_ON(slot != -1 && shortcut->parent_slot != slot);
383 parent = cursor;
384 cursor = shortcut->next_node;
385 slot = -1;
386 BUG_ON(!assoc_array_ptr_is_node(cursor));
387 }
388
389 pr_devel("[%d] node\n", slot);
390 node = assoc_array_ptr_to_node(cursor);
391 BUG_ON(node->back_pointer != parent);
392 BUG_ON(slot != -1 && node->parent_slot != slot);
393 slot = 0;
394
395continue_node:
396 pr_devel("Node %p [back=%p]\n", node, node->back_pointer);
397 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
398 struct assoc_array_ptr *ptr = node->slots[slot];
399 if (!ptr)
400 continue;
401 if (assoc_array_ptr_is_meta(ptr)) {
402 parent = cursor;
403 cursor = ptr;
404 goto move_to_meta;
405 }
406
407 if (ops) {
408 pr_devel("[%d] free leaf\n", slot);
409 ops->free_object(assoc_array_ptr_to_leaf(ptr));
410 }
411 }
412
413 parent = node->back_pointer;
414 slot = node->parent_slot;
415 pr_devel("free node\n");
416 kfree(node);
417 if (!parent)
418 return; /* Done */
419
420 /* Move back up to the parent (may need to free a shortcut on
421 * the way up) */
422 if (assoc_array_ptr_is_shortcut(parent)) {
423 shortcut = assoc_array_ptr_to_shortcut(parent);
424 BUG_ON(shortcut->next_node != cursor);
425 cursor = parent;
426 parent = shortcut->back_pointer;
427 slot = shortcut->parent_slot;
428 pr_devel("free shortcut\n");
429 kfree(shortcut);
430 if (!parent)
431 return;
432
433 BUG_ON(!assoc_array_ptr_is_node(parent));
434 }
435
436 /* Ascend to next slot in parent node */
437 pr_devel("ascend to %p[%d]\n", parent, slot);
438 cursor = parent;
439 node = assoc_array_ptr_to_node(cursor);
440 slot++;
441 goto continue_node;
442}
443
444/**
445 * assoc_array_destroy - Destroy an associative array
446 * @array: The array to destroy.
447 * @ops: The operations to use.
448 *
449 * Discard all metadata and free all objects in an associative array. The
450 * array will be empty and ready to use again upon completion. This function
451 * cannot fail.
452 *
453 * The caller must prevent all other accesses whilst this takes place as no
454 * attempt is made to adjust pointers gracefully to permit RCU readlock-holding
455 * accesses to continue. On the other hand, no memory allocation is required.
456 */
457void assoc_array_destroy(struct assoc_array *array,
458 const struct assoc_array_ops *ops)
459{
460 assoc_array_destroy_subtree(array->root, ops);
461 array->root = NULL;
462}
463
464/*
465 * Handle insertion into an empty tree.
466 */
467static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit)
468{
469 struct assoc_array_node *new_n0;
470
471 pr_devel("-->%s()\n", __func__);
472
473 new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
474 if (!new_n0)
475 return false;
476
477 edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
478 edit->leaf_p = &new_n0->slots[0];
479 edit->adjust_count_on = new_n0;
480 edit->set[0].ptr = &edit->array->root;
481 edit->set[0].to = assoc_array_node_to_ptr(new_n0);
482
483 pr_devel("<--%s() = ok [no root]\n", __func__);
484 return true;
485}
486
487/*
488 * Handle insertion into a terminal node.
489 */
490static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
491 const struct assoc_array_ops *ops,
492 const void *index_key,
493 struct assoc_array_walk_result *result)
494{
495 struct assoc_array_shortcut *shortcut, *new_s0;
496 struct assoc_array_node *node, *new_n0, *new_n1, *side;
497 struct assoc_array_ptr *ptr;
498 unsigned long dissimilarity, base_seg, blank;
499 size_t keylen;
500 bool have_meta;
501 int level, diff;
502 int slot, next_slot, free_slot, i, j;
503
504 node = result->terminal_node.node;
505 level = result->terminal_node.level;
506 edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
507
508 pr_devel("-->%s()\n", __func__);
509
510 /* We arrived at a node which doesn't have an onward node or shortcut
511 * pointer that we have to follow. This means that (a) the leaf we
512 * want must go here (either by insertion or replacement) or (b) we
513 * need to split this node and insert in one of the fragments.
514 */
515 free_slot = -1;
516
517 /* Firstly, we have to check the leaves in this node to see if there's
518 * a matching one we should replace in place.
519 */
520 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
521 ptr = node->slots[i];
522 if (!ptr) {
523 free_slot = i;
524 continue;
525 }
526 if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
527 pr_devel("replace in slot %d\n", i);
528 edit->leaf_p = &node->slots[i];
529 edit->dead_leaf = node->slots[i];
530 pr_devel("<--%s() = ok [replace]\n", __func__);
531 return true;
532 }
533 }
534
535 /* If there is a free slot in this node then we can just insert the
536 * leaf here.
537 */
538 if (free_slot >= 0) {
539 pr_devel("insert in free slot %d\n", free_slot);
540 edit->leaf_p = &node->slots[free_slot];
541 edit->adjust_count_on = node;
542 pr_devel("<--%s() = ok [insert]\n", __func__);
543 return true;
544 }
545
546 /* The node has no spare slots - so we're either going to have to split
547 * it or insert another node before it.
548 *
549 * Whatever, we're going to need at least two new nodes - so allocate
550 * those now. We may also need a new shortcut, but we deal with that
551 * when we need it.
552 */
553 new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
554 if (!new_n0)
555 return false;
556 edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
557 new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
558 if (!new_n1)
559 return false;
560 edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
561
562 /* We need to find out how similar the leaves are. */
563 pr_devel("no spare slots\n");
564 have_meta = false;
565 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
566 ptr = node->slots[i];
567 if (assoc_array_ptr_is_meta(ptr)) {
568 edit->segment_cache[i] = 0xff;
569 have_meta = true;
570 continue;
571 }
572 base_seg = ops->get_object_key_chunk(
573 assoc_array_ptr_to_leaf(ptr), level);
574 base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
575 edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
576 }
577
578 if (have_meta) {
579 pr_devel("have meta\n");
580 goto split_node;
581 }
582
583 /* The node contains only leaves */
584 dissimilarity = 0;
585 base_seg = edit->segment_cache[0];
586 for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
587 dissimilarity |= edit->segment_cache[i] ^ base_seg;
588
589 pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
590
591 if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
592 /* The old leaves all cluster in the same slot. We will need
593 * to insert a shortcut if the new node wants to cluster with them.
594 */
595 if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
596 goto all_leaves_cluster_together;
597
598 /* Otherwise we can just insert a new node ahead of the old
599 * one.
600 */
601 goto present_leaves_cluster_but_not_new_leaf;
602 }
603
604split_node:
605 pr_devel("split node\n");
606
607 /* We need to split the current node; we know that the node doesn't
608 * simply contain a full set of leaves that cluster together (it
609 * contains meta pointers and/or non-clustering leaves).
610 *
611 * We need to expel at least two leaves out of a set consisting of the
612 * leaves in the node and the new leaf.
613 *
614 * We need a new node (n0) to replace the current one and a new node to
615 * take the expelled nodes (n1).
616 */
617 edit->set[0].to = assoc_array_node_to_ptr(new_n0);
618 new_n0->back_pointer = node->back_pointer;
619 new_n0->parent_slot = node->parent_slot;
620 new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
621 new_n1->parent_slot = -1; /* Need to calculate this */
622
623do_split_node:
624 pr_devel("do_split_node\n");
625
626 new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
627 new_n1->nr_leaves_on_branch = 0;
628
629 /* Begin by finding two matching leaves. There have to be at least two
630 * that match - even if there are meta pointers - because any leaf that
631 * would match a slot with a meta pointer in it must be somewhere
632 * behind that meta pointer and cannot be here. Further, given N
633 * remaining leaf slots, we now have N+1 leaves to go in them.
634 */
635 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
636 slot = edit->segment_cache[i];
637 if (slot != 0xff)
638 for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
639 if (edit->segment_cache[j] == slot)
640 goto found_slot_for_multiple_occupancy;
641 }
642found_slot_for_multiple_occupancy:
643 pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
644 BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
645 BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
646 BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
647
648 new_n1->parent_slot = slot;
649
650 /* Metadata pointers cannot change slot */
651 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
652 if (assoc_array_ptr_is_meta(node->slots[i]))
653 new_n0->slots[i] = node->slots[i];
654 else
655 new_n0->slots[i] = NULL;
656 BUG_ON(new_n0->slots[slot] != NULL);
657 new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
658
659 /* Filter the leaf pointers between the new nodes */
660 free_slot = -1;
661 next_slot = 0;
662 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
663 if (assoc_array_ptr_is_meta(node->slots[i]))
664 continue;
665 if (edit->segment_cache[i] == slot) {
666 new_n1->slots[next_slot++] = node->slots[i];
667 new_n1->nr_leaves_on_branch++;
668 } else {
669 do {
670 free_slot++;
671 } while (new_n0->slots[free_slot] != NULL);
672 new_n0->slots[free_slot] = node->slots[i];
673 }
674 }
675
676 pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
677
678 if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
679 do {
680 free_slot++;
681 } while (new_n0->slots[free_slot] != NULL);
682 edit->leaf_p = &new_n0->slots[free_slot];
683 edit->adjust_count_on = new_n0;
684 } else {
685 edit->leaf_p = &new_n1->slots[next_slot++];
686 edit->adjust_count_on = new_n1;
687 }
688
689 BUG_ON(next_slot <= 1);
690
691 edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
692 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
693 if (edit->segment_cache[i] == 0xff) {
694 ptr = node->slots[i];
695 BUG_ON(assoc_array_ptr_is_leaf(ptr));
696 if (assoc_array_ptr_is_node(ptr)) {
697 side = assoc_array_ptr_to_node(ptr);
698 edit->set_backpointers[i] = &side->back_pointer;
699 } else {
700 shortcut = assoc_array_ptr_to_shortcut(ptr);
701 edit->set_backpointers[i] = &shortcut->back_pointer;
702 }
703 }
704 }
705
706 ptr = node->back_pointer;
707 if (!ptr)
708 edit->set[0].ptr = &edit->array->root;
709 else if (assoc_array_ptr_is_node(ptr))
710 edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
711 else
712 edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
713 edit->excised_meta[0] = assoc_array_node_to_ptr(node);
714 pr_devel("<--%s() = ok [split node]\n", __func__);
715 return true;
716
717present_leaves_cluster_but_not_new_leaf:
718 /* All the old leaves cluster in the same slot, but the new leaf wants
719 * to go into a different slot, so we create a new node to hold the new
720 * leaf and a pointer to a new node holding all the old leaves.
721 */
722 pr_devel("present leaves cluster but not new leaf\n");
723
724 new_n0->back_pointer = node->back_pointer;
725 new_n0->parent_slot = node->parent_slot;
726 new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
727 new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
728 new_n1->parent_slot = edit->segment_cache[0];
729 new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
730 edit->adjust_count_on = new_n0;
731
732 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
733 new_n1->slots[i] = node->slots[i];
734
735 new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
736 edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
737
738 edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
739 edit->set[0].to = assoc_array_node_to_ptr(new_n0);
740 edit->excised_meta[0] = assoc_array_node_to_ptr(node);
741 pr_devel("<--%s() = ok [insert node before]\n", __func__);
742 return true;
743
744all_leaves_cluster_together:
745 /* All the leaves, new and old, want to cluster together in this node
746 * in the same slot, so we have to replace this node with a shortcut to
747 * skip over the identical parts of the key and then place a pair of
748 * nodes, one inside the other, at the end of the shortcut and
749 * distribute the keys between them.
750 *
751 * Firstly we need to work out where the leaves start diverging as a
752 * bit position into their keys so that we know how big the shortcut
753 * needs to be.
754 *
755 * We only need to make a single pass of N of the N+1 leaves because if
756 * any keys differ between themselves at bit X then at least one of
757 * them must also differ with the base key at bit X or before.
758 */
759 pr_devel("all leaves cluster together\n");
760 diff = INT_MAX;
761 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
762 int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
763 index_key);
764 if (x < diff) {
765 BUG_ON(x < 0);
766 diff = x;
767 }
768 }
769 BUG_ON(diff == INT_MAX);
770 BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
771
772 keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
773 keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
774
775 new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
776 keylen * sizeof(unsigned long), GFP_KERNEL);
777 if (!new_s0)
778 return false;
779 edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
780
781 edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
782 new_s0->back_pointer = node->back_pointer;
783 new_s0->parent_slot = node->parent_slot;
784 new_s0->next_node = assoc_array_node_to_ptr(new_n0);
785 new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
786 new_n0->parent_slot = 0;
787 new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
788 new_n1->parent_slot = -1; /* Need to calculate this */
789
790 new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
791 pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
792 BUG_ON(level <= 0);
793
794 for (i = 0; i < keylen; i++)
795 new_s0->index_key[i] =
796 ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
797
798 blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
799 pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
800 new_s0->index_key[keylen - 1] &= ~blank;
801
802 /* This now reduces to a node splitting exercise for which we'll need
803 * to regenerate the disparity table.
804 */
805 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
806 ptr = node->slots[i];
807 base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
808 level);
809 base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
810 edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
811 }
812
813 base_seg = ops->get_key_chunk(index_key, level);
814 base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
815 edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
816 goto do_split_node;
817}
818
819/*
820 * Handle insertion into the middle of a shortcut.
821 */
822static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
823 const struct assoc_array_ops *ops,
824 struct assoc_array_walk_result *result)
825{
826 struct assoc_array_shortcut *shortcut, *new_s0, *new_s1;
827 struct assoc_array_node *node, *new_n0, *side;
828 unsigned long sc_segments, dissimilarity, blank;
829 size_t keylen;
830 int level, sc_level, diff;
831 int sc_slot;
832
833 shortcut = result->wrong_shortcut.shortcut;
834 level = result->wrong_shortcut.level;
835 sc_level = result->wrong_shortcut.sc_level;
836 sc_segments = result->wrong_shortcut.sc_segments;
837 dissimilarity = result->wrong_shortcut.dissimilarity;
838
839 pr_devel("-->%s(ix=%d dis=%lx scix=%d)\n",
840 __func__, level, dissimilarity, sc_level);
841
842 /* We need to split a shortcut and insert a node between the two
843 * pieces. Zero-length pieces will be dispensed with entirely.
844 *
845 * First of all, we need to find out in which level the first
846 * difference was.
847 */
848 diff = __ffs(dissimilarity);
849 diff &= ~ASSOC_ARRAY_LEVEL_STEP_MASK;
850 diff += sc_level & ~ASSOC_ARRAY_KEY_CHUNK_MASK;
851 pr_devel("diff=%d\n", diff);
852
853 if (!shortcut->back_pointer) {
854 edit->set[0].ptr = &edit->array->root;
855 } else if (assoc_array_ptr_is_node(shortcut->back_pointer)) {
856 node = assoc_array_ptr_to_node(shortcut->back_pointer);
857 edit->set[0].ptr = &node->slots[shortcut->parent_slot];
858 } else {
859 BUG();
860 }
861
862 edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut);
863
864 /* Create a new node now since we're going to need it anyway */
865 new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
866 if (!new_n0)
867 return false;
868 edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
869 edit->adjust_count_on = new_n0;
870
871 /* Insert a new shortcut before the new node if this segment isn't of
872 * zero length - otherwise we just connect the new node directly to the
873 * parent.
874 */
875 level += ASSOC_ARRAY_LEVEL_STEP;
876 if (diff > level) {
877 pr_devel("pre-shortcut %d...%d\n", level, diff);
878 keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
879 keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
880
881 new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
882 keylen * sizeof(unsigned long), GFP_KERNEL);
883 if (!new_s0)
884 return false;
885 edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
886 edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
887 new_s0->back_pointer = shortcut->back_pointer;
888 new_s0->parent_slot = shortcut->parent_slot;
889 new_s0->next_node = assoc_array_node_to_ptr(new_n0);
890 new_s0->skip_to_level = diff;
891
892 new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
893 new_n0->parent_slot = 0;
894
895 memcpy(new_s0->index_key, shortcut->index_key,
896 keylen * sizeof(unsigned long));
897
898 blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
899 pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank);
900 new_s0->index_key[keylen - 1] &= ~blank;
901 } else {
902 pr_devel("no pre-shortcut\n");
903 edit->set[0].to = assoc_array_node_to_ptr(new_n0);
904 new_n0->back_pointer = shortcut->back_pointer;
905 new_n0->parent_slot = shortcut->parent_slot;
906 }
907
908 side = assoc_array_ptr_to_node(shortcut->next_node);
909 new_n0->nr_leaves_on_branch = side->nr_leaves_on_branch;
910
911 /* We need to know which slot in the new node is going to take a
912 * metadata pointer.
913 */
914 sc_slot = sc_segments >> (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
915 sc_slot &= ASSOC_ARRAY_FAN_MASK;
916
917 pr_devel("new slot %lx >> %d -> %d\n",
918 sc_segments, diff & ASSOC_ARRAY_KEY_CHUNK_MASK, sc_slot);
919
920 /* Determine whether we need to follow the new node with a replacement
921 * for the current shortcut. We could in theory reuse the current
922 * shortcut if its parent slot number doesn't change - but that's a
923 * 1-in-16 chance so not worth expending the code upon.
924 */
925 level = diff + ASSOC_ARRAY_LEVEL_STEP;
926 if (level < shortcut->skip_to_level) {
927 pr_devel("post-shortcut %d...%d\n", level, shortcut->skip_to_level);
928 keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
929 keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
930
931 new_s1 = kzalloc(sizeof(struct assoc_array_shortcut) +
932 keylen * sizeof(unsigned long), GFP_KERNEL);
933 if (!new_s1)
934 return false;
935 edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
936
937 new_s1->back_pointer = assoc_array_node_to_ptr(new_n0);
938 new_s1->parent_slot = sc_slot;
939 new_s1->next_node = shortcut->next_node;
940 new_s1->skip_to_level = shortcut->skip_to_level;
941
942 new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1);
943
944 memcpy(new_s1->index_key, shortcut->index_key,
945 keylen * sizeof(unsigned long));
946
947 edit->set[1].ptr = &side->back_pointer;
948 edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
949 } else {
950 pr_devel("no post-shortcut\n");
951
952 /* We don't have to replace the pointed-to node as long as we
953 * use memory barriers to make sure the parent slot number is
954 * changed before the back pointer (the parent slot number is
955 * irrelevant to the old parent shortcut).
956 */
957 new_n0->slots[sc_slot] = shortcut->next_node;
958 edit->set_parent_slot[0].p = &side->parent_slot;
959 edit->set_parent_slot[0].to = sc_slot;
960 edit->set[1].ptr = &side->back_pointer;
961 edit->set[1].to = assoc_array_node_to_ptr(new_n0);
962 }
963
964 /* Install the new leaf in a spare slot in the new node. */
965 if (sc_slot == 0)
966 edit->leaf_p = &new_n0->slots[1];
967 else
968 edit->leaf_p = &new_n0->slots[0];
969
970 pr_devel("<--%s() = ok [split shortcut]\n", __func__);
971 return edit;
972}
973
974/**
975 * assoc_array_insert - Script insertion of an object into an associative array
976 * @array: The array to insert into.
977 * @ops: The operations to use.
978 * @index_key: The key to insert at.
979 * @object: The object to insert.
980 *
981 * Precalculate and preallocate a script for the insertion or replacement of an
982 * object in an associative array. This results in an edit script that can
983 * either be applied or cancelled.
984 *
985 * The function returns a pointer to an edit script or -ENOMEM.
986 *
987 * The caller should lock against other modifications and must continue to hold
988 * the lock until assoc_array_apply_edit() has been called.
989 *
990 * Accesses to the tree may take place concurrently with this function,
991 * provided they hold the RCU read lock.
992 */
993struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
994 const struct assoc_array_ops *ops,
995 const void *index_key,
996 void *object)
997{
998 struct assoc_array_walk_result result;
999 struct assoc_array_edit *edit;
1000
1001 pr_devel("-->%s()\n", __func__);
1002
1003 /* The leaf pointer we're given must not have the bottom bit set as we
1004 * use those for type-marking the pointer. NULL pointers are also not
1005 * allowed as they indicate an empty slot but we have to allow them
1006 * here as they can be updated later.
1007 */
1008 BUG_ON(assoc_array_ptr_is_meta(object));
1009
1010 edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
1011 if (!edit)
1012 return ERR_PTR(-ENOMEM);
1013 edit->array = array;
1014 edit->ops = ops;
1015 edit->leaf = assoc_array_leaf_to_ptr(object);
1016 edit->adjust_count_by = 1;
1017
1018 switch (assoc_array_walk(array, ops, index_key, &result)) {
1019 case assoc_array_walk_tree_empty:
1020 /* Allocate a root node if there isn't one yet */
1021 if (!assoc_array_insert_in_empty_tree(edit))
1022 goto enomem;
1023 return edit;
1024
1025 case assoc_array_walk_found_terminal_node:
1026 /* We found a node that doesn't have a node/shortcut pointer in
1027 * the slot corresponding to the index key that we have to
1028 * follow.
1029 */
1030 if (!assoc_array_insert_into_terminal_node(edit, ops, index_key,
1031 &result))
1032 goto enomem;
1033 return edit;
1034
1035 case assoc_array_walk_found_wrong_shortcut:
1036 /* We found a shortcut that didn't match our key in a slot we
1037 * needed to follow.
1038 */
1039 if (!assoc_array_insert_mid_shortcut(edit, ops, &result))
1040 goto enomem;
1041 return edit;
1042 }
1043
1044enomem:
1045 /* Clean up after an out of memory error */
1046 pr_devel("enomem\n");
1047 assoc_array_cancel_edit(edit);
1048 return ERR_PTR(-ENOMEM);
1049}
1050
1051/**
1052 * assoc_array_insert_set_object - Set the new object pointer in an edit script
1053 * @edit: The edit script to modify.
1054 * @object: The object pointer to set.
1055 *
1056 * Change the object to be inserted in an edit script. The object pointed to
1057 * by the old object is not freed. This must be done prior to applying the
1058 * script.
1059 */
1060void assoc_array_insert_set_object(struct assoc_array_edit *edit, void *object)
1061{
1062 BUG_ON(!object);
1063 edit->leaf = assoc_array_leaf_to_ptr(object);
1064}
1065
1066struct assoc_array_delete_collapse_context {
1067 struct assoc_array_node *node;
1068 const void *skip_leaf;
1069 int slot;
1070};
1071
1072/*
1073 * Subtree collapse to node iterator.
1074 */
1075static int assoc_array_delete_collapse_iterator(const void *leaf,
1076 void *iterator_data)
1077{
1078 struct assoc_array_delete_collapse_context *collapse = iterator_data;
1079
1080 if (leaf == collapse->skip_leaf)
1081 return 0;
1082
1083 BUG_ON(collapse->slot >= ASSOC_ARRAY_FAN_OUT);
1084
1085 collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf);
1086 return 0;
1087}
1088
1089/**
1090 * assoc_array_delete - Script deletion of an object from an associative array
1091 * @array: The array to search.
1092 * @ops: The operations to use.
1093 * @index_key: The key to the object.
1094 *
1095 * Precalculate and preallocate a script for the deletion of an object from an
1096 * associative array. This results in an edit script that can either be
1097 * applied or cancelled.
1098 *
1099 * The function returns a pointer to an edit script if the object was found,
1100 * NULL if the object was not found or -ENOMEM.
1101 *
1102 * The caller should lock against other modifications and must continue to hold
1103 * the lock until assoc_array_apply_edit() has been called.
1104 *
1105 * Accesses to the tree may take place concurrently with this function,
1106 * provided they hold the RCU read lock.
1107 */
1108struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
1109 const struct assoc_array_ops *ops,
1110 const void *index_key)
1111{
1112 struct assoc_array_delete_collapse_context collapse;
1113 struct assoc_array_walk_result result;
1114 struct assoc_array_node *node, *new_n0;
1115 struct assoc_array_edit *edit;
1116 struct assoc_array_ptr *ptr;
1117 bool has_meta;
1118 int slot, i;
1119
1120 pr_devel("-->%s()\n", __func__);
1121
1122 edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
1123 if (!edit)
1124 return ERR_PTR(-ENOMEM);
1125 edit->array = array;
1126 edit->ops = ops;
1127 edit->adjust_count_by = -1;
1128
1129 switch (assoc_array_walk(array, ops, index_key, &result)) {
1130 case assoc_array_walk_found_terminal_node:
1131 /* We found a node that should contain the leaf we've been
1132 * asked to remove - *if* it's in the tree.
1133 */
1134 pr_devel("terminal_node\n");
1135 node = result.terminal_node.node;
1136
1137 for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
1138 ptr = node->slots[slot];
1139 if (ptr &&
1140 assoc_array_ptr_is_leaf(ptr) &&
1141 ops->compare_object(assoc_array_ptr_to_leaf(ptr),
1142 index_key))
1143 goto found_leaf;
1144 }
1145 case assoc_array_walk_tree_empty:
1146 case assoc_array_walk_found_wrong_shortcut:
1147 default:
1148 assoc_array_cancel_edit(edit);
1149 pr_devel("not found\n");
1150 return NULL;
1151 }
1152
1153found_leaf:
1154 BUG_ON(array->nr_leaves_on_tree <= 0);
1155
1156 /* In the simplest form of deletion we just clear the slot and release
1157 * the leaf after a suitable interval.
1158 */
1159 edit->dead_leaf = node->slots[slot];
1160 edit->set[0].ptr = &node->slots[slot];
1161 edit->set[0].to = NULL;
1162 edit->adjust_count_on = node;
1163
1164 /* If that concludes erasure of the last leaf, then delete the entire
1165 * internal array.
1166 */
1167 if (array->nr_leaves_on_tree == 1) {
1168 edit->set[1].ptr = &array->root;
1169 edit->set[1].to = NULL;
1170 edit->adjust_count_on = NULL;
1171 edit->excised_subtree = array->root;
1172 pr_devel("all gone\n");
1173 return edit;
1174 }
1175
1176 /* However, we'd also like to clear up some metadata blocks if we
1177 * possibly can.
1178 *
1179 * We go for a simple algorithm of: if this node has FAN_OUT or fewer
1180 * leaves in it, then attempt to collapse it - and attempt to
1181 * recursively collapse up the tree.
1182 *
1183 * We could also try and collapse in partially filled subtrees to take
1184 * up space in this node.
1185 */
1186 if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
1187 struct assoc_array_node *parent, *grandparent;
1188 struct assoc_array_ptr *ptr;
1189
1190 /* First of all, we need to know if this node has metadata so
1191 * that we don't try collapsing if all the leaves are already
1192 * here.
1193 */
1194 has_meta = false;
1195 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
1196 ptr = node->slots[i];
1197 if (assoc_array_ptr_is_meta(ptr)) {
1198 has_meta = true;
1199 break;
1200 }
1201 }
1202
1203 pr_devel("leaves: %ld [m=%d]\n",
1204 node->nr_leaves_on_branch - 1, has_meta);
1205
1206 /* Look further up the tree to see if we can collapse this node
1207 * into a more proximal node too.
1208 */
1209 parent = node;
1210 collapse_up:
1211 pr_devel("collapse subtree: %ld\n", parent->nr_leaves_on_branch);
1212
1213 ptr = parent->back_pointer;
1214 if (!ptr)
1215 goto do_collapse;
1216 if (assoc_array_ptr_is_shortcut(ptr)) {
1217 struct assoc_array_shortcut *s = assoc_array_ptr_to_shortcut(ptr);
1218 ptr = s->back_pointer;
1219 if (!ptr)
1220 goto do_collapse;
1221 }
1222
1223 grandparent = assoc_array_ptr_to_node(ptr);
1224 if (grandparent->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
1225 parent = grandparent;
1226 goto collapse_up;
1227 }
1228
1229 do_collapse:
1230 /* There's no point collapsing if the original node has no meta
1231 * pointers to discard and if we didn't merge into one of that
1232 * node's ancestry.
1233 */
1234 if (has_meta || parent != node) {
1235 node = parent;
1236
1237 /* Create a new node to collapse into */
1238 new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
1239 if (!new_n0)
1240 goto enomem;
1241 edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
1242
1243 new_n0->back_pointer = node->back_pointer;
1244 new_n0->parent_slot = node->parent_slot;
1245 new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
1246 edit->adjust_count_on = new_n0;
1247
1248 collapse.node = new_n0;
1249 collapse.skip_leaf = assoc_array_ptr_to_leaf(edit->dead_leaf);
1250 collapse.slot = 0;
1251 assoc_array_subtree_iterate(assoc_array_node_to_ptr(node),
1252 node->back_pointer,
1253 assoc_array_delete_collapse_iterator,
1254 &collapse);
1255 pr_devel("collapsed %d,%lu\n", collapse.slot, new_n0->nr_leaves_on_branch);
1256 BUG_ON(collapse.slot != new_n0->nr_leaves_on_branch - 1);
1257
1258 if (!node->back_pointer) {
1259 edit->set[1].ptr = &array->root;
1260 } else if (assoc_array_ptr_is_leaf(node->back_pointer)) {
1261 BUG();
1262 } else if (assoc_array_ptr_is_node(node->back_pointer)) {
1263 struct assoc_array_node *p =
1264 assoc_array_ptr_to_node(node->back_pointer);
1265 edit->set[1].ptr = &p->slots[node->parent_slot];
1266 } else if (assoc_array_ptr_is_shortcut(node->back_pointer)) {
1267 struct assoc_array_shortcut *s =
1268 assoc_array_ptr_to_shortcut(node->back_pointer);
1269 edit->set[1].ptr = &s->next_node;
1270 }
1271 edit->set[1].to = assoc_array_node_to_ptr(new_n0);
1272 edit->excised_subtree = assoc_array_node_to_ptr(node);
1273 }
1274 }
1275
1276 return edit;
1277
1278enomem:
1279 /* Clean up after an out of memory error */
1280 pr_devel("enomem\n");
1281 assoc_array_cancel_edit(edit);
1282 return ERR_PTR(-ENOMEM);
1283}
1284
1285/**
1286 * assoc_array_clear - Script deletion of all objects from an associative array
1287 * @array: The array to clear.
1288 * @ops: The operations to use.
1289 *
1290 * Precalculate and preallocate a script for the deletion of all the objects
1291 * from an associative array. This results in an edit script that can either
1292 * be applied or cancelled.
1293 *
1294 * The function returns a pointer to an edit script if there are objects to be
1295 * deleted, NULL if there are no objects in the array or -ENOMEM.
1296 *
1297 * The caller should lock against other modifications and must continue to hold
1298 * the lock until assoc_array_apply_edit() has been called.
1299 *
1300 * Accesses to the tree may take place concurrently with this function,
1301 * provided they hold the RCU read lock.
1302 */
1303struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
1304 const struct assoc_array_ops *ops)
1305{
1306 struct assoc_array_edit *edit;
1307
1308 pr_devel("-->%s()\n", __func__);
1309
1310 if (!array->root)
1311 return NULL;
1312
1313 edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
1314 if (!edit)
1315 return ERR_PTR(-ENOMEM);
1316 edit->array = array;
1317 edit->ops = ops;
1318 edit->set[1].ptr = &array->root;
1319 edit->set[1].to = NULL;
1320 edit->excised_subtree = array->root;
1321 edit->ops_for_excised_subtree = ops;
1322 pr_devel("all gone\n");
1323 return edit;
1324}
1325
1326/*
1327 * Handle the deferred destruction after an applied edit.
1328 */
1329static void assoc_array_rcu_cleanup(struct rcu_head *head)
1330{
1331 struct assoc_array_edit *edit =
1332 container_of(head, struct assoc_array_edit, rcu);
1333 int i;
1334
1335 pr_devel("-->%s()\n", __func__);
1336
1337 if (edit->dead_leaf)
1338 edit->ops->free_object(assoc_array_ptr_to_leaf(edit->dead_leaf));
1339 for (i = 0; i < ARRAY_SIZE(edit->excised_meta); i++)
1340 if (edit->excised_meta[i])
1341 kfree(assoc_array_ptr_to_node(edit->excised_meta[i]));
1342
1343 if (edit->excised_subtree) {
1344 BUG_ON(assoc_array_ptr_is_leaf(edit->excised_subtree));
1345 if (assoc_array_ptr_is_node(edit->excised_subtree)) {
1346 struct assoc_array_node *n =
1347 assoc_array_ptr_to_node(edit->excised_subtree);
1348 n->back_pointer = NULL;
1349 } else {
1350 struct assoc_array_shortcut *s =
1351 assoc_array_ptr_to_shortcut(edit->excised_subtree);
1352 s->back_pointer = NULL;
1353 }
1354 assoc_array_destroy_subtree(edit->excised_subtree,
1355 edit->ops_for_excised_subtree);
1356 }
1357
1358 kfree(edit);
1359}
1360
1361/**
1362 * assoc_array_apply_edit - Apply an edit script to an associative array
1363 * @edit: The script to apply.
1364 *
1365 * Apply an edit script to an associative array to effect an insertion,
1366 * deletion or clearance. As the edit script includes preallocated memory,
1367 * this is guaranteed not to fail.
1368 *
1369 * The edit script, dead objects and dead metadata will be scheduled for
1370 * destruction after an RCU grace period to permit those doing read-only
1371 * accesses on the array to continue to do so under the RCU read lock whilst
1372 * the edit is taking place.
1373 */
1374void assoc_array_apply_edit(struct assoc_array_edit *edit)
1375{
1376 struct assoc_array_shortcut *shortcut;
1377 struct assoc_array_node *node;
1378 struct assoc_array_ptr *ptr;
1379 int i;
1380
1381 pr_devel("-->%s()\n", __func__);
1382
1383 smp_wmb();
1384 if (edit->leaf_p)
1385 *edit->leaf_p = edit->leaf;
1386
1387 smp_wmb();
1388 for (i = 0; i < ARRAY_SIZE(edit->set_parent_slot); i++)
1389 if (edit->set_parent_slot[i].p)
1390 *edit->set_parent_slot[i].p = edit->set_parent_slot[i].to;
1391
1392 smp_wmb();
1393 for (i = 0; i < ARRAY_SIZE(edit->set_backpointers); i++)
1394 if (edit->set_backpointers[i])
1395 *edit->set_backpointers[i] = edit->set_backpointers_to;
1396
1397 smp_wmb();
1398 for (i = 0; i < ARRAY_SIZE(edit->set); i++)
1399 if (edit->set[i].ptr)
1400 *edit->set[i].ptr = edit->set[i].to;
1401
1402 if (edit->array->root == NULL) {
1403 edit->array->nr_leaves_on_tree = 0;
1404 } else if (edit->adjust_count_on) {
1405 node = edit->adjust_count_on;
1406 for (;;) {
1407 node->nr_leaves_on_branch += edit->adjust_count_by;
1408
1409 ptr = node->back_pointer;
1410 if (!ptr)
1411 break;
1412 if (assoc_array_ptr_is_shortcut(ptr)) {
1413 shortcut = assoc_array_ptr_to_shortcut(ptr);
1414 ptr = shortcut->back_pointer;
1415 if (!ptr)
1416 break;
1417 }
1418 BUG_ON(!assoc_array_ptr_is_node(ptr));
1419 node = assoc_array_ptr_to_node(ptr);
1420 }
1421
1422 edit->array->nr_leaves_on_tree += edit->adjust_count_by;
1423 }
1424
1425 call_rcu(&edit->rcu, assoc_array_rcu_cleanup);
1426}
1427
1428/**
1429 * assoc_array_cancel_edit - Discard an edit script.
1430 * @edit: The script to discard.
1431 *
1432 * Free an edit script and all the preallocated data it holds without making
1433 * any changes to the associative array it was intended for.
1434 *
1435 * NOTE! In the case of an insertion script, this does _not_ release the leaf
1436 * that was to be inserted. That is left to the caller.
1437 */
1438void assoc_array_cancel_edit(struct assoc_array_edit *edit)
1439{
1440 struct assoc_array_ptr *ptr;
1441 int i;
1442
1443 pr_devel("-->%s()\n", __func__);
1444
1445 /* Clean up after an out of memory error */
1446 for (i = 0; i < ARRAY_SIZE(edit->new_meta); i++) {
1447 ptr = edit->new_meta[i];
1448 if (ptr) {
1449 if (assoc_array_ptr_is_node(ptr))
1450 kfree(assoc_array_ptr_to_node(ptr));
1451 else
1452 kfree(assoc_array_ptr_to_shortcut(ptr));
1453 }
1454 }
1455 kfree(edit);
1456}
1457
1458/**
1459 * assoc_array_gc - Garbage collect an associative array.
1460 * @array: The array to clean.
1461 * @ops: The operations to use.
1462 * @iterator: A callback function to pass judgement on each object.
1463 * @iterator_data: Private data for the callback function.
1464 *
1465 * Collect garbage from an associative array and pack down the internal tree to
1466 * save memory.
1467 *
1468 * The iterator function is asked to pass judgement upon each object in the
1469 * array. If it returns false, the object is discard and if it returns true,
1470 * the object is kept. If it returns true, it must increment the object's
1471 * usage count (or whatever it needs to do to retain it) before returning.
1472 *
1473 * This function returns 0 if successful or -ENOMEM if out of memory. In the
1474 * latter case, the array is not changed.
1475 *
1476 * The caller should lock against other modifications and must continue to hold
1477 * the lock until assoc_array_apply_edit() has been called.
1478 *
1479 * Accesses to the tree may take place concurrently with this function,
1480 * provided they hold the RCU read lock.
1481 */
1482int assoc_array_gc(struct assoc_array *array,
1483 const struct assoc_array_ops *ops,
1484 bool (*iterator)(void *object, void *iterator_data),
1485 void *iterator_data)
1486{
1487 struct assoc_array_shortcut *shortcut, *new_s;
1488 struct assoc_array_node *node, *new_n;
1489 struct assoc_array_edit *edit;
1490 struct assoc_array_ptr *cursor, *ptr;
1491 struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
1492 unsigned long nr_leaves_on_tree;
1493 int keylen, slot, nr_free, next_slot, i;
1494
1495 pr_devel("-->%s()\n", __func__);
1496
1497 if (!array->root)
1498 return 0;
1499
1500 edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
1501 if (!edit)
1502 return -ENOMEM;
1503 edit->array = array;
1504 edit->ops = ops;
1505 edit->ops_for_excised_subtree = ops;
1506 edit->set[0].ptr = &array->root;
1507 edit->excised_subtree = array->root;
1508
1509 new_root = new_parent = NULL;
1510 new_ptr_pp = &new_root;
1511 cursor = array->root;
1512
1513descend:
1514 /* If this point is a shortcut, then we need to duplicate it and
1515 * advance the target cursor.
1516 */
1517 if (assoc_array_ptr_is_shortcut(cursor)) {
1518 shortcut = assoc_array_ptr_to_shortcut(cursor);
1519 keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
1520 keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
1521 new_s = kmalloc(sizeof(struct assoc_array_shortcut) +
1522 keylen * sizeof(unsigned long), GFP_KERNEL);
1523 if (!new_s)
1524 goto enomem;
1525 pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
1526 memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) +
1527 keylen * sizeof(unsigned long)));
1528 new_s->back_pointer = new_parent;
1529 new_s->parent_slot = shortcut->parent_slot;
1530 *new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
1531 new_ptr_pp = &new_s->next_node;
1532 cursor = shortcut->next_node;
1533 }
1534
1535 /* Duplicate the node at this position */
1536 node = assoc_array_ptr_to_node(cursor);
1537 new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
1538 if (!new_n)
1539 goto enomem;
1540 pr_devel("dup node %p -> %p\n", node, new_n);
1541 new_n->back_pointer = new_parent;
1542 new_n->parent_slot = node->parent_slot;
1543 *new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n);
1544 new_ptr_pp = NULL;
1545 slot = 0;
1546
1547continue_node:
1548 /* Filter across any leaves and gc any subtrees */
1549 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
1550 ptr = node->slots[slot];
1551 if (!ptr)
1552 continue;
1553
1554 if (assoc_array_ptr_is_leaf(ptr)) {
1555 if (iterator(assoc_array_ptr_to_leaf(ptr),
1556 iterator_data))
1557 /* The iterator will have done any reference
1558 * counting on the object for us.
1559 */
1560 new_n->slots[slot] = ptr;
1561 continue;
1562 }
1563
1564 new_ptr_pp = &new_n->slots[slot];
1565 cursor = ptr;
1566 goto descend;
1567 }
1568
1569 pr_devel("-- compress node %p --\n", new_n);
1570
1571 /* Count up the number of empty slots in this node and work out the
1572 * subtree leaf count.
1573 */
1574 new_n->nr_leaves_on_branch = 0;
1575 nr_free = 0;
1576 for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
1577 ptr = new_n->slots[slot];
1578 if (!ptr)
1579 nr_free++;
1580 else if (assoc_array_ptr_is_leaf(ptr))
1581 new_n->nr_leaves_on_branch++;
1582 }
1583 pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
1584
1585 /* See what we can fold in */
1586 next_slot = 0;
1587 for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
1588 struct assoc_array_shortcut *s;
1589 struct assoc_array_node *child;
1590
1591 ptr = new_n->slots[slot];
1592 if (!ptr || assoc_array_ptr_is_leaf(ptr))
1593 continue;
1594
1595 s = NULL;
1596 if (assoc_array_ptr_is_shortcut(ptr)) {
1597 s = assoc_array_ptr_to_shortcut(ptr);
1598 ptr = s->next_node;
1599 }
1600
1601 child = assoc_array_ptr_to_node(ptr);
1602 new_n->nr_leaves_on_branch += child->nr_leaves_on_branch;
1603
1604 if (child->nr_leaves_on_branch <= nr_free + 1) {
1605 /* Fold the child node into this one */
1606 pr_devel("[%d] fold node %lu/%d [nx %d]\n",
1607 slot, child->nr_leaves_on_branch, nr_free + 1,
1608 next_slot);
1609
1610 /* We would already have reaped an intervening shortcut
1611 * on the way back up the tree.
1612 */
1613 BUG_ON(s);
1614
1615 new_n->slots[slot] = NULL;
1616 nr_free++;
1617 if (slot < next_slot)
1618 next_slot = slot;
1619 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
1620 struct assoc_array_ptr *p = child->slots[i];
1621 if (!p)
1622 continue;
1623 BUG_ON(assoc_array_ptr_is_meta(p));
1624 while (new_n->slots[next_slot])
1625 next_slot++;
1626 BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT);
1627 new_n->slots[next_slot++] = p;
1628 nr_free--;
1629 }
1630 kfree(child);
1631 } else {
1632 pr_devel("[%d] retain node %lu/%d [nx %d]\n",
1633 slot, child->nr_leaves_on_branch, nr_free + 1,
1634 next_slot);
1635 }
1636 }
1637
1638 pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
1639
1640 nr_leaves_on_tree = new_n->nr_leaves_on_branch;
1641
1642 /* Excise this node if it is singly occupied by a shortcut */
1643 if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) {
1644 for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++)
1645 if ((ptr = new_n->slots[slot]))
1646 break;
1647
1648 if (assoc_array_ptr_is_meta(ptr) &&
1649 assoc_array_ptr_is_shortcut(ptr)) {
1650 pr_devel("excise node %p with 1 shortcut\n", new_n);
1651 new_s = assoc_array_ptr_to_shortcut(ptr);
1652 new_parent = new_n->back_pointer;
1653 slot = new_n->parent_slot;
1654 kfree(new_n);
1655 if (!new_parent) {
1656 new_s->back_pointer = NULL;
1657 new_s->parent_slot = 0;
1658 new_root = ptr;
1659 goto gc_complete;
1660 }
1661
1662 if (assoc_array_ptr_is_shortcut(new_parent)) {
1663 /* We can discard any preceding shortcut also */
1664 struct assoc_array_shortcut *s =
1665 assoc_array_ptr_to_shortcut(new_parent);
1666
1667 pr_devel("excise preceding shortcut\n");
1668
1669 new_parent = new_s->back_pointer = s->back_pointer;
1670 slot = new_s->parent_slot = s->parent_slot;
1671 kfree(s);
1672 if (!new_parent) {
1673 new_s->back_pointer = NULL;
1674 new_s->parent_slot = 0;
1675 new_root = ptr;
1676 goto gc_complete;
1677 }
1678 }
1679
1680 new_s->back_pointer = new_parent;
1681 new_s->parent_slot = slot;
1682 new_n = assoc_array_ptr_to_node(new_parent);
1683 new_n->slots[slot] = ptr;
1684 goto ascend_old_tree;
1685 }
1686 }
1687
1688 /* Excise any shortcuts we might encounter that point to nodes that
1689 * only contain leaves.
1690 */
1691 ptr = new_n->back_pointer;
1692 if (!ptr)
1693 goto gc_complete;
1694
1695 if (assoc_array_ptr_is_shortcut(ptr)) {
1696 new_s = assoc_array_ptr_to_shortcut(ptr);
1697 new_parent = new_s->back_pointer;
1698 slot = new_s->parent_slot;
1699
1700 if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
1701 struct assoc_array_node *n;
1702
1703 pr_devel("excise shortcut\n");
1704 new_n->back_pointer = new_parent;
1705 new_n->parent_slot = slot;
1706 kfree(new_s);
1707 if (!new_parent) {
1708 new_root = assoc_array_node_to_ptr(new_n);
1709 goto gc_complete;
1710 }
1711
1712 n = assoc_array_ptr_to_node(new_parent);
1713 n->slots[slot] = assoc_array_node_to_ptr(new_n);
1714 }
1715 } else {
1716 new_parent = ptr;
1717 }
1718 new_n = assoc_array_ptr_to_node(new_parent);
1719
1720ascend_old_tree:
1721 ptr = node->back_pointer;
1722 if (assoc_array_ptr_is_shortcut(ptr)) {
1723 shortcut = assoc_array_ptr_to_shortcut(ptr);
1724 slot = shortcut->parent_slot;
1725 cursor = shortcut->back_pointer;
1726 } else {
1727 slot = node->parent_slot;
1728 cursor = ptr;
1729 }
1730 BUG_ON(!ptr);
1731 node = assoc_array_ptr_to_node(cursor);
1732 slot++;
1733 goto continue_node;
1734
1735gc_complete:
1736 edit->set[0].to = new_root;
1737 assoc_array_apply_edit(edit);
1738 edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
1739 return 0;
1740
1741enomem:
1742 pr_devel("enomem\n");
1743 assoc_array_destroy_subtree(new_root, edit->ops);
1744 kfree(edit);
1745 return -ENOMEM;
1746}
diff --git a/lib/lockref.c b/lib/lockref.c
index d2b123f8456b..f07a40d33871 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,5 +1,6 @@
1#include <linux/export.h> 1#include <linux/export.h>
2#include <linux/lockref.h> 2#include <linux/lockref.h>
3#include <linux/mutex.h>
3 4
4#if USE_CMPXCHG_LOCKREF 5#if USE_CMPXCHG_LOCKREF
5 6
@@ -12,14 +13,6 @@
12#endif 13#endif
13 14
14/* 15/*
15 * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
16 * This is useful for architectures with an expensive cpu_relax().
17 */
18#ifndef arch_mutex_cpu_relax
19# define arch_mutex_cpu_relax() cpu_relax()
20#endif
21
22/*
23 * Note that the "cmpxchg()" reloads the "old" value for the 16 * Note that the "cmpxchg()" reloads the "old" value for the
24 * failure case. 17 * failure case.
25 */ 18 */
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 657979f71bef..bf076d281d40 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -121,3 +121,6 @@ void mpi_free(MPI a)
121 kfree(a); 121 kfree(a);
122} 122}
123EXPORT_SYMBOL_GPL(mpi_free); 123EXPORT_SYMBOL_GPL(mpi_free);
124
125MODULE_DESCRIPTION("Multiprecision maths library");
126MODULE_LICENSE("GPL");
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index b0698ea972c6..9d054bf91d0f 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -117,8 +117,7 @@ static inline void alloc_global_tags(struct percpu_ida *pool,
117 min(pool->nr_free, pool->percpu_batch_size)); 117 min(pool->nr_free, pool->percpu_batch_size));
118} 118}
119 119
120static inline unsigned alloc_local_tag(struct percpu_ida *pool, 120static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
121 struct percpu_ida_cpu *tags)
122{ 121{
123 int tag = -ENOSPC; 122 int tag = -ENOSPC;
124 123
@@ -159,7 +158,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
159 tags = this_cpu_ptr(pool->tag_cpu); 158 tags = this_cpu_ptr(pool->tag_cpu);
160 159
161 /* Fastpath */ 160 /* Fastpath */
162 tag = alloc_local_tag(pool, tags); 161 tag = alloc_local_tag(tags);
163 if (likely(tag >= 0)) { 162 if (likely(tag >= 0)) {
164 local_irq_restore(flags); 163 local_irq_restore(flags);
165 return tag; 164 return tag;
diff --git a/lib/random32.c b/lib/random32.c
index 82da4f4c3489..1e5b2df44291 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -214,18 +214,22 @@ static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
214static void __prandom_timer(unsigned long dontcare) 214static void __prandom_timer(unsigned long dontcare)
215{ 215{
216 u32 entropy; 216 u32 entropy;
217 unsigned long expires;
217 218
218 get_random_bytes(&entropy, sizeof(entropy)); 219 get_random_bytes(&entropy, sizeof(entropy));
219 prandom_seed(entropy); 220 prandom_seed(entropy);
221
220 /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ 222 /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
221 seed_timer.expires = jiffies + (40 * HZ + (prandom_u32() % (40 * HZ))); 223 expires = 40 + (prandom_u32() % 40);
224 seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
225
222 add_timer(&seed_timer); 226 add_timer(&seed_timer);
223} 227}
224 228
225static void prandom_start_seed_timer(void) 229static void __init __prandom_start_seed_timer(void)
226{ 230{
227 set_timer_slack(&seed_timer, HZ); 231 set_timer_slack(&seed_timer, HZ);
228 seed_timer.expires = jiffies + 40 * HZ; 232 seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
229 add_timer(&seed_timer); 233 add_timer(&seed_timer);
230} 234}
231 235
@@ -270,7 +274,7 @@ void prandom_reseed_late(void)
270static int __init prandom_reseed(void) 274static int __init prandom_reseed(void)
271{ 275{
272 __prandom_reseed(false); 276 __prandom_reseed(false);
273 prandom_start_seed_timer(); 277 __prandom_start_seed_timer();
274 return 0; 278 return 0;
275} 279}
276late_initcall(prandom_reseed); 280late_initcall(prandom_reseed);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bccd5a628ea6..33a5dc492810 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1481,8 +1481,18 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1481 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); 1481 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1482 VM_BUG_ON(!pmd_none(*new_pmd)); 1482 VM_BUG_ON(!pmd_none(*new_pmd));
1483 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1483 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1484 if (new_ptl != old_ptl) 1484 if (new_ptl != old_ptl) {
1485 pgtable_t pgtable;
1486
1487 /*
1488 * Move preallocated PTE page table if new_pmd is on
1489 * different PMD page table.
1490 */
1491 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1492 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1493
1485 spin_unlock(new_ptl); 1494 spin_unlock(new_ptl);
1495 }
1486 spin_unlock(old_ptl); 1496 spin_unlock(old_ptl);
1487 } 1497 }
1488out: 1498out:
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7d57af21f49e..dee6cf4e6d34 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -476,40 +476,6 @@ static int vma_has_reserves(struct vm_area_struct *vma, long chg)
476 return 0; 476 return 0;
477} 477}
478 478
479static void copy_gigantic_page(struct page *dst, struct page *src)
480{
481 int i;
482 struct hstate *h = page_hstate(src);
483 struct page *dst_base = dst;
484 struct page *src_base = src;
485
486 for (i = 0; i < pages_per_huge_page(h); ) {
487 cond_resched();
488 copy_highpage(dst, src);
489
490 i++;
491 dst = mem_map_next(dst, dst_base, i);
492 src = mem_map_next(src, src_base, i);
493 }
494}
495
496void copy_huge_page(struct page *dst, struct page *src)
497{
498 int i;
499 struct hstate *h = page_hstate(src);
500
501 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
502 copy_gigantic_page(dst, src);
503 return;
504 }
505
506 might_sleep();
507 for (i = 0; i < pages_per_huge_page(h); i++) {
508 cond_resched();
509 copy_highpage(dst + i, src + i);
510 }
511}
512
513static void enqueue_huge_page(struct hstate *h, struct page *page) 479static void enqueue_huge_page(struct hstate *h, struct page *page)
514{ 480{
515 int nid = page_to_nid(page); 481 int nid = page_to_nid(page);
@@ -736,6 +702,23 @@ int PageHuge(struct page *page)
736} 702}
737EXPORT_SYMBOL_GPL(PageHuge); 703EXPORT_SYMBOL_GPL(PageHuge);
738 704
705/*
706 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
707 * normal or transparent huge pages.
708 */
709int PageHeadHuge(struct page *page_head)
710{
711 compound_page_dtor *dtor;
712
713 if (!PageHead(page_head))
714 return 0;
715
716 dtor = get_compound_page_dtor(page_head);
717
718 return dtor == free_huge_page;
719}
720EXPORT_SYMBOL_GPL(PageHeadHuge);
721
739pgoff_t __basepage_index(struct page *page) 722pgoff_t __basepage_index(struct page *page)
740{ 723{
741 struct page *page_head = compound_head(page); 724 struct page *page_head = compound_head(page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f1a0ae6e11b8..bf5e89457149 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2694,7 +2694,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
2694 goto bypass; 2694 goto bypass;
2695 2695
2696 if (unlikely(task_in_memcg_oom(current))) 2696 if (unlikely(task_in_memcg_oom(current)))
2697 goto bypass; 2697 goto nomem;
2698
2699 if (gfp_mask & __GFP_NOFAIL)
2700 oom = false;
2698 2701
2699 /* 2702 /*
2700 * We always charge the cgroup the mm_struct belongs to. 2703 * We always charge the cgroup the mm_struct belongs to.
@@ -6352,6 +6355,42 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6352static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 6355static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
6353{ 6356{
6354 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6357 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6358 /*
6359 * XXX: css_offline() would be where we should reparent all
6360 * memory to prepare the cgroup for destruction. However,
6361 * memcg does not do css_tryget() and res_counter charging
6362 * under the same RCU lock region, which means that charging
6363 * could race with offlining. Offlining only happens to
6364 * cgroups with no tasks in them but charges can show up
6365 * without any tasks from the swapin path when the target
6366 * memcg is looked up from the swapout record and not from the
6367 * current task as it usually is. A race like this can leak
6368 * charges and put pages with stale cgroup pointers into
6369 * circulation:
6370 *
6371 * #0 #1
6372 * lookup_swap_cgroup_id()
6373 * rcu_read_lock()
6374 * mem_cgroup_lookup()
6375 * css_tryget()
6376 * rcu_read_unlock()
6377 * disable css_tryget()
6378 * call_rcu()
6379 * offline_css()
6380 * reparent_charges()
6381 * res_counter_charge()
6382 * css_put()
6383 * css_free()
6384 * pc->mem_cgroup = dead memcg
6385 * add page to lru
6386 *
6387 * The bulk of the charges are still moved in offline_css() to
6388 * avoid pinning a lot of pages in case a long-term reference
6389 * like a swapout record is deferring the css_free() to long
6390 * after offlining. But this makes sure we catch any charges
6391 * made after offlining:
6392 */
6393 mem_cgroup_reparent_charges(memcg);
6355 6394
6356 memcg_destroy_kmem(memcg); 6395 memcg_destroy_kmem(memcg);
6357 __mem_cgroup_free(memcg); 6396 __mem_cgroup_free(memcg);
diff --git a/mm/memory.c b/mm/memory.c
index 0409e8f43fa0..5d9025f3b3e1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4272,13 +4272,6 @@ void copy_user_huge_page(struct page *dst, struct page *src,
4272#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 4272#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4273 4273
4274#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS 4274#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
4275static struct kmem_cache *page_ptl_cachep;
4276void __init ptlock_cache_init(void)
4277{
4278 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
4279 SLAB_PANIC, NULL);
4280}
4281
4282bool ptlock_alloc(struct page *page) 4275bool ptlock_alloc(struct page *page)
4283{ 4276{
4284 spinlock_t *ptl; 4277 spinlock_t *ptl;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index c4403cdf3433..eca4a3129129 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2950,7 +2950,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2950 return; 2950 return;
2951 } 2951 }
2952 2952
2953 p += snprintf(p, maxlen, policy_modes[mode]); 2953 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2954 2954
2955 if (flags & MPOL_MODE_FLAGS) { 2955 if (flags & MPOL_MODE_FLAGS) {
2956 p += snprintf(p, buffer + maxlen - p, "="); 2956 p += snprintf(p, buffer + maxlen - p, "=");
diff --git a/mm/migrate.c b/mm/migrate.c
index 316e720a2023..bb940045fe85 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -442,6 +442,54 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
442} 442}
443 443
444/* 444/*
445 * Gigantic pages are so large that we do not guarantee that page++ pointer
446 * arithmetic will work across the entire page. We need something more
447 * specialized.
448 */
449static void __copy_gigantic_page(struct page *dst, struct page *src,
450 int nr_pages)
451{
452 int i;
453 struct page *dst_base = dst;
454 struct page *src_base = src;
455
456 for (i = 0; i < nr_pages; ) {
457 cond_resched();
458 copy_highpage(dst, src);
459
460 i++;
461 dst = mem_map_next(dst, dst_base, i);
462 src = mem_map_next(src, src_base, i);
463 }
464}
465
466static void copy_huge_page(struct page *dst, struct page *src)
467{
468 int i;
469 int nr_pages;
470
471 if (PageHuge(src)) {
472 /* hugetlbfs page */
473 struct hstate *h = page_hstate(src);
474 nr_pages = pages_per_huge_page(h);
475
476 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
477 __copy_gigantic_page(dst, src, nr_pages);
478 return;
479 }
480 } else {
481 /* thp page */
482 BUG_ON(!PageTransHuge(src));
483 nr_pages = hpage_nr_pages(src);
484 }
485
486 for (i = 0; i < nr_pages; i++) {
487 cond_resched();
488 copy_highpage(dst + i, src + i);
489 }
490}
491
492/*
445 * Copy the page to its new location 493 * Copy the page to its new location
446 */ 494 */
447void migrate_page_copy(struct page *newpage, struct page *page) 495void migrate_page_copy(struct page *newpage, struct page *page)
diff --git a/mm/shmem.c b/mm/shmem.c
index 8297623fcaed..902a14842b74 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2918,13 +2918,8 @@ static struct dentry_operations anon_ops = {
2918 .d_dname = simple_dname 2918 .d_dname = simple_dname
2919}; 2919};
2920 2920
2921/** 2921static struct file *__shmem_file_setup(const char *name, loff_t size,
2922 * shmem_file_setup - get an unlinked file living in tmpfs 2922 unsigned long flags, unsigned int i_flags)
2923 * @name: name for dentry (to be seen in /proc/<pid>/maps
2924 * @size: size to be set for the file
2925 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2926 */
2927struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2928{ 2923{
2929 struct file *res; 2924 struct file *res;
2930 struct inode *inode; 2925 struct inode *inode;
@@ -2957,6 +2952,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
2957 if (!inode) 2952 if (!inode)
2958 goto put_dentry; 2953 goto put_dentry;
2959 2954
2955 inode->i_flags |= i_flags;
2960 d_instantiate(path.dentry, inode); 2956 d_instantiate(path.dentry, inode);
2961 inode->i_size = size; 2957 inode->i_size = size;
2962 clear_nlink(inode); /* It is unlinked */ 2958 clear_nlink(inode); /* It is unlinked */
@@ -2977,6 +2973,32 @@ put_memory:
2977 shmem_unacct_size(flags, size); 2973 shmem_unacct_size(flags, size);
2978 return res; 2974 return res;
2979} 2975}
2976
2977/**
2978 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
2979 * kernel internal. There will be NO LSM permission checks against the
2980 * underlying inode. So users of this interface must do LSM checks at a
2981 * higher layer. The one user is the big_key implementation. LSM checks
2982 * are provided at the key level rather than the inode level.
2983 * @name: name for dentry (to be seen in /proc/<pid>/maps
2984 * @size: size to be set for the file
2985 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2986 */
2987struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
2988{
2989 return __shmem_file_setup(name, size, flags, S_PRIVATE);
2990}
2991
2992/**
2993 * shmem_file_setup - get an unlinked file living in tmpfs
2994 * @name: name for dentry (to be seen in /proc/<pid>/maps
2995 * @size: size to be set for the file
2996 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2997 */
2998struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2999{
3000 return __shmem_file_setup(name, size, flags, 0);
3001}
2980EXPORT_SYMBOL_GPL(shmem_file_setup); 3002EXPORT_SYMBOL_GPL(shmem_file_setup);
2981 3003
2982/** 3004/**
diff --git a/mm/slab.c b/mm/slab.c
index 0c8967bb2018..eb043bf05f4c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -164,72 +164,6 @@
164static bool pfmemalloc_active __read_mostly; 164static bool pfmemalloc_active __read_mostly;
165 165
166/* 166/*
167 * kmem_bufctl_t:
168 *
169 * Bufctl's are used for linking objs within a slab
170 * linked offsets.
171 *
172 * This implementation relies on "struct page" for locating the cache &
173 * slab an object belongs to.
174 * This allows the bufctl structure to be small (one int), but limits
175 * the number of objects a slab (not a cache) can contain when off-slab
176 * bufctls are used. The limit is the size of the largest general cache
177 * that does not use off-slab slabs.
178 * For 32bit archs with 4 kB pages, is this 56.
179 * This is not serious, as it is only for large objects, when it is unwise
180 * to have too many per slab.
181 * Note: This limit can be raised by introducing a general cache whose size
182 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
183 */
184
185typedef unsigned int kmem_bufctl_t;
186#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
187#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
188#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
189#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
190
191/*
192 * struct slab_rcu
193 *
194 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
195 * arrange for kmem_freepages to be called via RCU. This is useful if
196 * we need to approach a kernel structure obliquely, from its address
197 * obtained without the usual locking. We can lock the structure to
198 * stabilize it and check it's still at the given address, only if we
199 * can be sure that the memory has not been meanwhile reused for some
200 * other kind of object (which our subsystem's lock might corrupt).
201 *
202 * rcu_read_lock before reading the address, then rcu_read_unlock after
203 * taking the spinlock within the structure expected at that address.
204 */
205struct slab_rcu {
206 struct rcu_head head;
207 struct kmem_cache *cachep;
208 void *addr;
209};
210
211/*
212 * struct slab
213 *
214 * Manages the objs in a slab. Placed either at the beginning of mem allocated
215 * for a slab, or allocated from an general cache.
216 * Slabs are chained into three list: fully used, partial, fully free slabs.
217 */
218struct slab {
219 union {
220 struct {
221 struct list_head list;
222 unsigned long colouroff;
223 void *s_mem; /* including colour offset */
224 unsigned int inuse; /* num of objs active in slab */
225 kmem_bufctl_t free;
226 unsigned short nodeid;
227 };
228 struct slab_rcu __slab_cover_slab_rcu;
229 };
230};
231
232/*
233 * struct array_cache 167 * struct array_cache
234 * 168 *
235 * Purpose: 169 * Purpose:
@@ -456,18 +390,10 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
456 return page->slab_cache; 390 return page->slab_cache;
457} 391}
458 392
459static inline struct slab *virt_to_slab(const void *obj) 393static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
460{
461 struct page *page = virt_to_head_page(obj);
462
463 VM_BUG_ON(!PageSlab(page));
464 return page->slab_page;
465}
466
467static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
468 unsigned int idx) 394 unsigned int idx)
469{ 395{
470 return slab->s_mem + cache->size * idx; 396 return page->s_mem + cache->size * idx;
471} 397}
472 398
473/* 399/*
@@ -477,9 +403,9 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
477 * reciprocal_divide(offset, cache->reciprocal_buffer_size) 403 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
478 */ 404 */
479static inline unsigned int obj_to_index(const struct kmem_cache *cache, 405static inline unsigned int obj_to_index(const struct kmem_cache *cache,
480 const struct slab *slab, void *obj) 406 const struct page *page, void *obj)
481{ 407{
482 u32 offset = (obj - slab->s_mem); 408 u32 offset = (obj - page->s_mem);
483 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 409 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
484} 410}
485 411
@@ -641,7 +567,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
641 567
642static size_t slab_mgmt_size(size_t nr_objs, size_t align) 568static size_t slab_mgmt_size(size_t nr_objs, size_t align)
643{ 569{
644 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 570 return ALIGN(nr_objs * sizeof(unsigned int), align);
645} 571}
646 572
647/* 573/*
@@ -660,8 +586,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
660 * on it. For the latter case, the memory allocated for a 586 * on it. For the latter case, the memory allocated for a
661 * slab is used for: 587 * slab is used for:
662 * 588 *
663 * - The struct slab 589 * - One unsigned int for each object
664 * - One kmem_bufctl_t for each object
665 * - Padding to respect alignment of @align 590 * - Padding to respect alignment of @align
666 * - @buffer_size bytes for each object 591 * - @buffer_size bytes for each object
667 * 592 *
@@ -674,8 +599,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
674 mgmt_size = 0; 599 mgmt_size = 0;
675 nr_objs = slab_size / buffer_size; 600 nr_objs = slab_size / buffer_size;
676 601
677 if (nr_objs > SLAB_LIMIT)
678 nr_objs = SLAB_LIMIT;
679 } else { 602 } else {
680 /* 603 /*
681 * Ignore padding for the initial guess. The padding 604 * Ignore padding for the initial guess. The padding
@@ -685,8 +608,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
685 * into the memory allocation when taking the padding 608 * into the memory allocation when taking the padding
686 * into account. 609 * into account.
687 */ 610 */
688 nr_objs = (slab_size - sizeof(struct slab)) / 611 nr_objs = (slab_size) / (buffer_size + sizeof(unsigned int));
689 (buffer_size + sizeof(kmem_bufctl_t));
690 612
691 /* 613 /*
692 * This calculated number will be either the right 614 * This calculated number will be either the right
@@ -696,9 +618,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
696 > slab_size) 618 > slab_size)
697 nr_objs--; 619 nr_objs--;
698 620
699 if (nr_objs > SLAB_LIMIT)
700 nr_objs = SLAB_LIMIT;
701
702 mgmt_size = slab_mgmt_size(nr_objs, align); 621 mgmt_size = slab_mgmt_size(nr_objs, align);
703 } 622 }
704 *num = nr_objs; 623 *num = nr_objs;
@@ -829,10 +748,8 @@ static struct array_cache *alloc_arraycache(int node, int entries,
829 return nc; 748 return nc;
830} 749}
831 750
832static inline bool is_slab_pfmemalloc(struct slab *slabp) 751static inline bool is_slab_pfmemalloc(struct page *page)
833{ 752{
834 struct page *page = virt_to_page(slabp->s_mem);
835
836 return PageSlabPfmemalloc(page); 753 return PageSlabPfmemalloc(page);
837} 754}
838 755
@@ -841,23 +758,23 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
841 struct array_cache *ac) 758 struct array_cache *ac)
842{ 759{
843 struct kmem_cache_node *n = cachep->node[numa_mem_id()]; 760 struct kmem_cache_node *n = cachep->node[numa_mem_id()];
844 struct slab *slabp; 761 struct page *page;
845 unsigned long flags; 762 unsigned long flags;
846 763
847 if (!pfmemalloc_active) 764 if (!pfmemalloc_active)
848 return; 765 return;
849 766
850 spin_lock_irqsave(&n->list_lock, flags); 767 spin_lock_irqsave(&n->list_lock, flags);
851 list_for_each_entry(slabp, &n->slabs_full, list) 768 list_for_each_entry(page, &n->slabs_full, lru)
852 if (is_slab_pfmemalloc(slabp)) 769 if (is_slab_pfmemalloc(page))
853 goto out; 770 goto out;
854 771
855 list_for_each_entry(slabp, &n->slabs_partial, list) 772 list_for_each_entry(page, &n->slabs_partial, lru)
856 if (is_slab_pfmemalloc(slabp)) 773 if (is_slab_pfmemalloc(page))
857 goto out; 774 goto out;
858 775
859 list_for_each_entry(slabp, &n->slabs_free, list) 776 list_for_each_entry(page, &n->slabs_free, lru)
860 if (is_slab_pfmemalloc(slabp)) 777 if (is_slab_pfmemalloc(page))
861 goto out; 778 goto out;
862 779
863 pfmemalloc_active = false; 780 pfmemalloc_active = false;
@@ -897,8 +814,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
897 */ 814 */
898 n = cachep->node[numa_mem_id()]; 815 n = cachep->node[numa_mem_id()];
899 if (!list_empty(&n->slabs_free) && force_refill) { 816 if (!list_empty(&n->slabs_free) && force_refill) {
900 struct slab *slabp = virt_to_slab(objp); 817 struct page *page = virt_to_head_page(objp);
901 ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); 818 ClearPageSlabPfmemalloc(page);
902 clear_obj_pfmemalloc(&objp); 819 clear_obj_pfmemalloc(&objp);
903 recheck_pfmemalloc_active(cachep, ac); 820 recheck_pfmemalloc_active(cachep, ac);
904 return objp; 821 return objp;
@@ -1099,8 +1016,7 @@ static void drain_alien_cache(struct kmem_cache *cachep,
1099 1016
1100static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1017static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1101{ 1018{
1102 struct slab *slabp = virt_to_slab(objp); 1019 int nodeid = page_to_nid(virt_to_page(objp));
1103 int nodeid = slabp->nodeid;
1104 struct kmem_cache_node *n; 1020 struct kmem_cache_node *n;
1105 struct array_cache *alien = NULL; 1021 struct array_cache *alien = NULL;
1106 int node; 1022 int node;
@@ -1111,7 +1027,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1111 * Make sure we are not freeing a object from another node to the array 1027 * Make sure we are not freeing a object from another node to the array
1112 * cache on this cpu. 1028 * cache on this cpu.
1113 */ 1029 */
1114 if (likely(slabp->nodeid == node)) 1030 if (likely(nodeid == node))
1115 return 0; 1031 return 0;
1116 1032
1117 n = cachep->node[node]; 1033 n = cachep->node[node];
@@ -1512,6 +1428,8 @@ void __init kmem_cache_init(void)
1512{ 1428{
1513 int i; 1429 int i;
1514 1430
1431 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1432 sizeof(struct rcu_head));
1515 kmem_cache = &kmem_cache_boot; 1433 kmem_cache = &kmem_cache_boot;
1516 setup_node_pointer(kmem_cache); 1434 setup_node_pointer(kmem_cache);
1517 1435
@@ -1687,7 +1605,7 @@ static noinline void
1687slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) 1605slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1688{ 1606{
1689 struct kmem_cache_node *n; 1607 struct kmem_cache_node *n;
1690 struct slab *slabp; 1608 struct page *page;
1691 unsigned long flags; 1609 unsigned long flags;
1692 int node; 1610 int node;
1693 1611
@@ -1706,15 +1624,15 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1706 continue; 1624 continue;
1707 1625
1708 spin_lock_irqsave(&n->list_lock, flags); 1626 spin_lock_irqsave(&n->list_lock, flags);
1709 list_for_each_entry(slabp, &n->slabs_full, list) { 1627 list_for_each_entry(page, &n->slabs_full, lru) {
1710 active_objs += cachep->num; 1628 active_objs += cachep->num;
1711 active_slabs++; 1629 active_slabs++;
1712 } 1630 }
1713 list_for_each_entry(slabp, &n->slabs_partial, list) { 1631 list_for_each_entry(page, &n->slabs_partial, lru) {
1714 active_objs += slabp->inuse; 1632 active_objs += page->active;
1715 active_slabs++; 1633 active_slabs++;
1716 } 1634 }
1717 list_for_each_entry(slabp, &n->slabs_free, list) 1635 list_for_each_entry(page, &n->slabs_free, lru)
1718 num_slabs++; 1636 num_slabs++;
1719 1637
1720 free_objects += n->free_objects; 1638 free_objects += n->free_objects;
@@ -1736,19 +1654,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1736 * did not request dmaable memory, we might get it, but that 1654 * did not request dmaable memory, we might get it, but that
1737 * would be relatively rare and ignorable. 1655 * would be relatively rare and ignorable.
1738 */ 1656 */
1739static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) 1657static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1658 int nodeid)
1740{ 1659{
1741 struct page *page; 1660 struct page *page;
1742 int nr_pages; 1661 int nr_pages;
1743 int i;
1744
1745#ifndef CONFIG_MMU
1746 /*
1747 * Nommu uses slab's for process anonymous memory allocations, and thus
1748 * requires __GFP_COMP to properly refcount higher order allocations
1749 */
1750 flags |= __GFP_COMP;
1751#endif
1752 1662
1753 flags |= cachep->allocflags; 1663 flags |= cachep->allocflags;
1754 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1664 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
@@ -1772,12 +1682,9 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1772 else 1682 else
1773 add_zone_page_state(page_zone(page), 1683 add_zone_page_state(page_zone(page),
1774 NR_SLAB_UNRECLAIMABLE, nr_pages); 1684 NR_SLAB_UNRECLAIMABLE, nr_pages);
1775 for (i = 0; i < nr_pages; i++) { 1685 __SetPageSlab(page);
1776 __SetPageSlab(page + i); 1686 if (page->pfmemalloc)
1777 1687 SetPageSlabPfmemalloc(page);
1778 if (page->pfmemalloc)
1779 SetPageSlabPfmemalloc(page + i);
1780 }
1781 memcg_bind_pages(cachep, cachep->gfporder); 1688 memcg_bind_pages(cachep, cachep->gfporder);
1782 1689
1783 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1690 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
@@ -1789,17 +1696,15 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1789 kmemcheck_mark_unallocated_pages(page, nr_pages); 1696 kmemcheck_mark_unallocated_pages(page, nr_pages);
1790 } 1697 }
1791 1698
1792 return page_address(page); 1699 return page;
1793} 1700}
1794 1701
1795/* 1702/*
1796 * Interface to system's page release. 1703 * Interface to system's page release.
1797 */ 1704 */
1798static void kmem_freepages(struct kmem_cache *cachep, void *addr) 1705static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1799{ 1706{
1800 unsigned long i = (1 << cachep->gfporder); 1707 const unsigned long nr_freed = (1 << cachep->gfporder);
1801 struct page *page = virt_to_page(addr);
1802 const unsigned long nr_freed = i;
1803 1708
1804 kmemcheck_free_shadow(page, cachep->gfporder); 1709 kmemcheck_free_shadow(page, cachep->gfporder);
1805 1710
@@ -1809,27 +1714,28 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1809 else 1714 else
1810 sub_zone_page_state(page_zone(page), 1715 sub_zone_page_state(page_zone(page),
1811 NR_SLAB_UNRECLAIMABLE, nr_freed); 1716 NR_SLAB_UNRECLAIMABLE, nr_freed);
1812 while (i--) { 1717
1813 BUG_ON(!PageSlab(page)); 1718 BUG_ON(!PageSlab(page));
1814 __ClearPageSlabPfmemalloc(page); 1719 __ClearPageSlabPfmemalloc(page);
1815 __ClearPageSlab(page); 1720 __ClearPageSlab(page);
1816 page++; 1721 page_mapcount_reset(page);
1817 } 1722 page->mapping = NULL;
1818 1723
1819 memcg_release_pages(cachep, cachep->gfporder); 1724 memcg_release_pages(cachep, cachep->gfporder);
1820 if (current->reclaim_state) 1725 if (current->reclaim_state)
1821 current->reclaim_state->reclaimed_slab += nr_freed; 1726 current->reclaim_state->reclaimed_slab += nr_freed;
1822 free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); 1727 __free_memcg_kmem_pages(page, cachep->gfporder);
1823} 1728}
1824 1729
1825static void kmem_rcu_free(struct rcu_head *head) 1730static void kmem_rcu_free(struct rcu_head *head)
1826{ 1731{
1827 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1732 struct kmem_cache *cachep;
1828 struct kmem_cache *cachep = slab_rcu->cachep; 1733 struct page *page;
1829 1734
1830 kmem_freepages(cachep, slab_rcu->addr); 1735 page = container_of(head, struct page, rcu_head);
1831 if (OFF_SLAB(cachep)) 1736 cachep = page->slab_cache;
1832 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1737
1738 kmem_freepages(cachep, page);
1833} 1739}
1834 1740
1835#if DEBUG 1741#if DEBUG
@@ -1978,19 +1884,19 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1978 /* Print some data about the neighboring objects, if they 1884 /* Print some data about the neighboring objects, if they
1979 * exist: 1885 * exist:
1980 */ 1886 */
1981 struct slab *slabp = virt_to_slab(objp); 1887 struct page *page = virt_to_head_page(objp);
1982 unsigned int objnr; 1888 unsigned int objnr;
1983 1889
1984 objnr = obj_to_index(cachep, slabp, objp); 1890 objnr = obj_to_index(cachep, page, objp);
1985 if (objnr) { 1891 if (objnr) {
1986 objp = index_to_obj(cachep, slabp, objnr - 1); 1892 objp = index_to_obj(cachep, page, objnr - 1);
1987 realobj = (char *)objp + obj_offset(cachep); 1893 realobj = (char *)objp + obj_offset(cachep);
1988 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1894 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1989 realobj, size); 1895 realobj, size);
1990 print_objinfo(cachep, objp, 2); 1896 print_objinfo(cachep, objp, 2);
1991 } 1897 }
1992 if (objnr + 1 < cachep->num) { 1898 if (objnr + 1 < cachep->num) {
1993 objp = index_to_obj(cachep, slabp, objnr + 1); 1899 objp = index_to_obj(cachep, page, objnr + 1);
1994 realobj = (char *)objp + obj_offset(cachep); 1900 realobj = (char *)objp + obj_offset(cachep);
1995 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1901 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1996 realobj, size); 1902 realobj, size);
@@ -2001,11 +1907,12 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
2001#endif 1907#endif
2002 1908
2003#if DEBUG 1909#if DEBUG
2004static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) 1910static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1911 struct page *page)
2005{ 1912{
2006 int i; 1913 int i;
2007 for (i = 0; i < cachep->num; i++) { 1914 for (i = 0; i < cachep->num; i++) {
2008 void *objp = index_to_obj(cachep, slabp, i); 1915 void *objp = index_to_obj(cachep, page, i);
2009 1916
2010 if (cachep->flags & SLAB_POISON) { 1917 if (cachep->flags & SLAB_POISON) {
2011#ifdef CONFIG_DEBUG_PAGEALLOC 1918#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -2030,7 +1937,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
2030 } 1937 }
2031} 1938}
2032#else 1939#else
2033static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) 1940static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1941 struct page *page)
2034{ 1942{
2035} 1943}
2036#endif 1944#endif
@@ -2044,23 +1952,34 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
2044 * Before calling the slab must have been unlinked from the cache. The 1952 * Before calling the slab must have been unlinked from the cache. The
2045 * cache-lock is not held/needed. 1953 * cache-lock is not held/needed.
2046 */ 1954 */
2047static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) 1955static void slab_destroy(struct kmem_cache *cachep, struct page *page)
2048{ 1956{
2049 void *addr = slabp->s_mem - slabp->colouroff; 1957 void *freelist;
2050 1958
2051 slab_destroy_debugcheck(cachep, slabp); 1959 freelist = page->freelist;
1960 slab_destroy_debugcheck(cachep, page);
2052 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1961 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
2053 struct slab_rcu *slab_rcu; 1962 struct rcu_head *head;
1963
1964 /*
1965 * RCU free overloads the RCU head over the LRU.
1966 * slab_page has been overloeaded over the LRU,
1967 * however it is not used from now on so that
1968 * we can use it safely.
1969 */
1970 head = (void *)&page->rcu_head;
1971 call_rcu(head, kmem_rcu_free);
2054 1972
2055 slab_rcu = (struct slab_rcu *)slabp;
2056 slab_rcu->cachep = cachep;
2057 slab_rcu->addr = addr;
2058 call_rcu(&slab_rcu->head, kmem_rcu_free);
2059 } else { 1973 } else {
2060 kmem_freepages(cachep, addr); 1974 kmem_freepages(cachep, page);
2061 if (OFF_SLAB(cachep))
2062 kmem_cache_free(cachep->slabp_cache, slabp);
2063 } 1975 }
1976
1977 /*
1978 * From now on, we don't use freelist
1979 * although actual page can be freed in rcu context
1980 */
1981 if (OFF_SLAB(cachep))
1982 kmem_cache_free(cachep->freelist_cache, freelist);
2064} 1983}
2065 1984
2066/** 1985/**
@@ -2097,8 +2016,8 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2097 * use off-slab slabs. Needed to avoid a possible 2016 * use off-slab slabs. Needed to avoid a possible
2098 * looping condition in cache_grow(). 2017 * looping condition in cache_grow().
2099 */ 2018 */
2100 offslab_limit = size - sizeof(struct slab); 2019 offslab_limit = size;
2101 offslab_limit /= sizeof(kmem_bufctl_t); 2020 offslab_limit /= sizeof(unsigned int);
2102 2021
2103 if (num > offslab_limit) 2022 if (num > offslab_limit)
2104 break; 2023 break;
@@ -2220,7 +2139,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2220int 2139int
2221__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) 2140__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2222{ 2141{
2223 size_t left_over, slab_size, ralign; 2142 size_t left_over, freelist_size, ralign;
2224 gfp_t gfp; 2143 gfp_t gfp;
2225 int err; 2144 int err;
2226 size_t size = cachep->size; 2145 size_t size = cachep->size;
@@ -2339,22 +2258,21 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2339 if (!cachep->num) 2258 if (!cachep->num)
2340 return -E2BIG; 2259 return -E2BIG;
2341 2260
2342 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2261 freelist_size =
2343 + sizeof(struct slab), cachep->align); 2262 ALIGN(cachep->num * sizeof(unsigned int), cachep->align);
2344 2263
2345 /* 2264 /*
2346 * If the slab has been placed off-slab, and we have enough space then 2265 * If the slab has been placed off-slab, and we have enough space then
2347 * move it on-slab. This is at the expense of any extra colouring. 2266 * move it on-slab. This is at the expense of any extra colouring.
2348 */ 2267 */
2349 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { 2268 if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
2350 flags &= ~CFLGS_OFF_SLAB; 2269 flags &= ~CFLGS_OFF_SLAB;
2351 left_over -= slab_size; 2270 left_over -= freelist_size;
2352 } 2271 }
2353 2272
2354 if (flags & CFLGS_OFF_SLAB) { 2273 if (flags & CFLGS_OFF_SLAB) {
2355 /* really off slab. No need for manual alignment */ 2274 /* really off slab. No need for manual alignment */
2356 slab_size = 2275 freelist_size = cachep->num * sizeof(unsigned int);
2357 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2358 2276
2359#ifdef CONFIG_PAGE_POISONING 2277#ifdef CONFIG_PAGE_POISONING
2360 /* If we're going to use the generic kernel_map_pages() 2278 /* If we're going to use the generic kernel_map_pages()
@@ -2371,16 +2289,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2371 if (cachep->colour_off < cachep->align) 2289 if (cachep->colour_off < cachep->align)
2372 cachep->colour_off = cachep->align; 2290 cachep->colour_off = cachep->align;
2373 cachep->colour = left_over / cachep->colour_off; 2291 cachep->colour = left_over / cachep->colour_off;
2374 cachep->slab_size = slab_size; 2292 cachep->freelist_size = freelist_size;
2375 cachep->flags = flags; 2293 cachep->flags = flags;
2376 cachep->allocflags = 0; 2294 cachep->allocflags = __GFP_COMP;
2377 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2295 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2378 cachep->allocflags |= GFP_DMA; 2296 cachep->allocflags |= GFP_DMA;
2379 cachep->size = size; 2297 cachep->size = size;
2380 cachep->reciprocal_buffer_size = reciprocal_value(size); 2298 cachep->reciprocal_buffer_size = reciprocal_value(size);
2381 2299
2382 if (flags & CFLGS_OFF_SLAB) { 2300 if (flags & CFLGS_OFF_SLAB) {
2383 cachep->slabp_cache = kmalloc_slab(slab_size, 0u); 2301 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
2384 /* 2302 /*
2385 * This is a possibility for one of the malloc_sizes caches. 2303 * This is a possibility for one of the malloc_sizes caches.
2386 * But since we go off slab only for object size greater than 2304 * But since we go off slab only for object size greater than
@@ -2388,7 +2306,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2388 * this should not happen at all. 2306 * this should not happen at all.
2389 * But leave a BUG_ON for some lucky dude. 2307 * But leave a BUG_ON for some lucky dude.
2390 */ 2308 */
2391 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); 2309 BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
2392 } 2310 }
2393 2311
2394 err = setup_cpu_cache(cachep, gfp); 2312 err = setup_cpu_cache(cachep, gfp);
@@ -2494,7 +2412,7 @@ static int drain_freelist(struct kmem_cache *cache,
2494{ 2412{
2495 struct list_head *p; 2413 struct list_head *p;
2496 int nr_freed; 2414 int nr_freed;
2497 struct slab *slabp; 2415 struct page *page;
2498 2416
2499 nr_freed = 0; 2417 nr_freed = 0;
2500 while (nr_freed < tofree && !list_empty(&n->slabs_free)) { 2418 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
@@ -2506,18 +2424,18 @@ static int drain_freelist(struct kmem_cache *cache,
2506 goto out; 2424 goto out;
2507 } 2425 }
2508 2426
2509 slabp = list_entry(p, struct slab, list); 2427 page = list_entry(p, struct page, lru);
2510#if DEBUG 2428#if DEBUG
2511 BUG_ON(slabp->inuse); 2429 BUG_ON(page->active);
2512#endif 2430#endif
2513 list_del(&slabp->list); 2431 list_del(&page->lru);
2514 /* 2432 /*
2515 * Safe to drop the lock. The slab is no longer linked 2433 * Safe to drop the lock. The slab is no longer linked
2516 * to the cache. 2434 * to the cache.
2517 */ 2435 */
2518 n->free_objects -= cache->num; 2436 n->free_objects -= cache->num;
2519 spin_unlock_irq(&n->list_lock); 2437 spin_unlock_irq(&n->list_lock);
2520 slab_destroy(cache, slabp); 2438 slab_destroy(cache, page);
2521 nr_freed++; 2439 nr_freed++;
2522 } 2440 }
2523out: 2441out:
@@ -2600,52 +2518,42 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
2600 * descriptors in kmem_cache_create, we search through the malloc_sizes array. 2518 * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2601 * If we are creating a malloc_sizes cache here it would not be visible to 2519 * If we are creating a malloc_sizes cache here it would not be visible to
2602 * kmem_find_general_cachep till the initialization is complete. 2520 * kmem_find_general_cachep till the initialization is complete.
2603 * Hence we cannot have slabp_cache same as the original cache. 2521 * Hence we cannot have freelist_cache same as the original cache.
2604 */ 2522 */
2605static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2523static void *alloc_slabmgmt(struct kmem_cache *cachep,
2606 int colour_off, gfp_t local_flags, 2524 struct page *page, int colour_off,
2607 int nodeid) 2525 gfp_t local_flags, int nodeid)
2608{ 2526{
2609 struct slab *slabp; 2527 void *freelist;
2528 void *addr = page_address(page);
2610 2529
2611 if (OFF_SLAB(cachep)) { 2530 if (OFF_SLAB(cachep)) {
2612 /* Slab management obj is off-slab. */ 2531 /* Slab management obj is off-slab. */
2613 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2532 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2614 local_flags, nodeid); 2533 local_flags, nodeid);
2615 /* 2534 if (!freelist)
2616 * If the first object in the slab is leaked (it's allocated
2617 * but no one has a reference to it), we want to make sure
2618 * kmemleak does not treat the ->s_mem pointer as a reference
2619 * to the object. Otherwise we will not report the leak.
2620 */
2621 kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
2622 local_flags);
2623 if (!slabp)
2624 return NULL; 2535 return NULL;
2625 } else { 2536 } else {
2626 slabp = objp + colour_off; 2537 freelist = addr + colour_off;
2627 colour_off += cachep->slab_size; 2538 colour_off += cachep->freelist_size;
2628 } 2539 }
2629 slabp->inuse = 0; 2540 page->active = 0;
2630 slabp->colouroff = colour_off; 2541 page->s_mem = addr + colour_off;
2631 slabp->s_mem = objp + colour_off; 2542 return freelist;
2632 slabp->nodeid = nodeid;
2633 slabp->free = 0;
2634 return slabp;
2635} 2543}
2636 2544
2637static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2545static inline unsigned int *slab_freelist(struct page *page)
2638{ 2546{
2639 return (kmem_bufctl_t *) (slabp + 1); 2547 return (unsigned int *)(page->freelist);
2640} 2548}
2641 2549
2642static void cache_init_objs(struct kmem_cache *cachep, 2550static void cache_init_objs(struct kmem_cache *cachep,
2643 struct slab *slabp) 2551 struct page *page)
2644{ 2552{
2645 int i; 2553 int i;
2646 2554
2647 for (i = 0; i < cachep->num; i++) { 2555 for (i = 0; i < cachep->num; i++) {
2648 void *objp = index_to_obj(cachep, slabp, i); 2556 void *objp = index_to_obj(cachep, page, i);
2649#if DEBUG 2557#if DEBUG
2650 /* need to poison the objs? */ 2558 /* need to poison the objs? */
2651 if (cachep->flags & SLAB_POISON) 2559 if (cachep->flags & SLAB_POISON)
@@ -2681,9 +2589,8 @@ static void cache_init_objs(struct kmem_cache *cachep,
2681 if (cachep->ctor) 2589 if (cachep->ctor)
2682 cachep->ctor(objp); 2590 cachep->ctor(objp);
2683#endif 2591#endif
2684 slab_bufctl(slabp)[i] = i + 1; 2592 slab_freelist(page)[i] = i;
2685 } 2593 }
2686 slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2687} 2594}
2688 2595
2689static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2596static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
@@ -2696,41 +2603,41 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2696 } 2603 }
2697} 2604}
2698 2605
2699static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2606static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
2700 int nodeid) 2607 int nodeid)
2701{ 2608{
2702 void *objp = index_to_obj(cachep, slabp, slabp->free); 2609 void *objp;
2703 kmem_bufctl_t next;
2704 2610
2705 slabp->inuse++; 2611 objp = index_to_obj(cachep, page, slab_freelist(page)[page->active]);
2706 next = slab_bufctl(slabp)[slabp->free]; 2612 page->active++;
2707#if DEBUG 2613#if DEBUG
2708 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2614 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2709 WARN_ON(slabp->nodeid != nodeid);
2710#endif 2615#endif
2711 slabp->free = next;
2712 2616
2713 return objp; 2617 return objp;
2714} 2618}
2715 2619
2716static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, 2620static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
2717 void *objp, int nodeid) 2621 void *objp, int nodeid)
2718{ 2622{
2719 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2623 unsigned int objnr = obj_to_index(cachep, page, objp);
2720
2721#if DEBUG 2624#if DEBUG
2625 unsigned int i;
2626
2722 /* Verify that the slab belongs to the intended node */ 2627 /* Verify that the slab belongs to the intended node */
2723 WARN_ON(slabp->nodeid != nodeid); 2628 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2724 2629
2725 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { 2630 /* Verify double free bug */
2726 printk(KERN_ERR "slab: double free detected in cache " 2631 for (i = page->active; i < cachep->num; i++) {
2727 "'%s', objp %p\n", cachep->name, objp); 2632 if (slab_freelist(page)[i] == objnr) {
2728 BUG(); 2633 printk(KERN_ERR "slab: double free detected in cache "
2634 "'%s', objp %p\n", cachep->name, objp);
2635 BUG();
2636 }
2729 } 2637 }
2730#endif 2638#endif
2731 slab_bufctl(slabp)[objnr] = slabp->free; 2639 page->active--;
2732 slabp->free = objnr; 2640 slab_freelist(page)[page->active] = objnr;
2733 slabp->inuse--;
2734} 2641}
2735 2642
2736/* 2643/*
@@ -2738,23 +2645,11 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2738 * for the slab allocator to be able to lookup the cache and slab of a 2645 * for the slab allocator to be able to lookup the cache and slab of a
2739 * virtual address for kfree, ksize, and slab debugging. 2646 * virtual address for kfree, ksize, and slab debugging.
2740 */ 2647 */
2741static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2648static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2742 void *addr) 2649 void *freelist)
2743{ 2650{
2744 int nr_pages; 2651 page->slab_cache = cache;
2745 struct page *page; 2652 page->freelist = freelist;
2746
2747 page = virt_to_page(addr);
2748
2749 nr_pages = 1;
2750 if (likely(!PageCompound(page)))
2751 nr_pages <<= cache->gfporder;
2752
2753 do {
2754 page->slab_cache = cache;
2755 page->slab_page = slab;
2756 page++;
2757 } while (--nr_pages);
2758} 2653}
2759 2654
2760/* 2655/*
@@ -2762,9 +2657,9 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2762 * kmem_cache_alloc() when there are no active objs left in a cache. 2657 * kmem_cache_alloc() when there are no active objs left in a cache.
2763 */ 2658 */
2764static int cache_grow(struct kmem_cache *cachep, 2659static int cache_grow(struct kmem_cache *cachep,
2765 gfp_t flags, int nodeid, void *objp) 2660 gfp_t flags, int nodeid, struct page *page)
2766{ 2661{
2767 struct slab *slabp; 2662 void *freelist;
2768 size_t offset; 2663 size_t offset;
2769 gfp_t local_flags; 2664 gfp_t local_flags;
2770 struct kmem_cache_node *n; 2665 struct kmem_cache_node *n;
@@ -2805,20 +2700,20 @@ static int cache_grow(struct kmem_cache *cachep,
2805 * Get mem for the objs. Attempt to allocate a physical page from 2700 * Get mem for the objs. Attempt to allocate a physical page from
2806 * 'nodeid'. 2701 * 'nodeid'.
2807 */ 2702 */
2808 if (!objp) 2703 if (!page)
2809 objp = kmem_getpages(cachep, local_flags, nodeid); 2704 page = kmem_getpages(cachep, local_flags, nodeid);
2810 if (!objp) 2705 if (!page)
2811 goto failed; 2706 goto failed;
2812 2707
2813 /* Get slab management. */ 2708 /* Get slab management. */
2814 slabp = alloc_slabmgmt(cachep, objp, offset, 2709 freelist = alloc_slabmgmt(cachep, page, offset,
2815 local_flags & ~GFP_CONSTRAINT_MASK, nodeid); 2710 local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2816 if (!slabp) 2711 if (!freelist)
2817 goto opps1; 2712 goto opps1;
2818 2713
2819 slab_map_pages(cachep, slabp, objp); 2714 slab_map_pages(cachep, page, freelist);
2820 2715
2821 cache_init_objs(cachep, slabp); 2716 cache_init_objs(cachep, page);
2822 2717
2823 if (local_flags & __GFP_WAIT) 2718 if (local_flags & __GFP_WAIT)
2824 local_irq_disable(); 2719 local_irq_disable();
@@ -2826,13 +2721,13 @@ static int cache_grow(struct kmem_cache *cachep,
2826 spin_lock(&n->list_lock); 2721 spin_lock(&n->list_lock);
2827 2722
2828 /* Make slab active. */ 2723 /* Make slab active. */
2829 list_add_tail(&slabp->list, &(n->slabs_free)); 2724 list_add_tail(&page->lru, &(n->slabs_free));
2830 STATS_INC_GROWN(cachep); 2725 STATS_INC_GROWN(cachep);
2831 n->free_objects += cachep->num; 2726 n->free_objects += cachep->num;
2832 spin_unlock(&n->list_lock); 2727 spin_unlock(&n->list_lock);
2833 return 1; 2728 return 1;
2834opps1: 2729opps1:
2835 kmem_freepages(cachep, objp); 2730 kmem_freepages(cachep, page);
2836failed: 2731failed:
2837 if (local_flags & __GFP_WAIT) 2732 if (local_flags & __GFP_WAIT)
2838 local_irq_disable(); 2733 local_irq_disable();
@@ -2880,9 +2775,8 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2880static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2775static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2881 unsigned long caller) 2776 unsigned long caller)
2882{ 2777{
2883 struct page *page;
2884 unsigned int objnr; 2778 unsigned int objnr;
2885 struct slab *slabp; 2779 struct page *page;
2886 2780
2887 BUG_ON(virt_to_cache(objp) != cachep); 2781 BUG_ON(virt_to_cache(objp) != cachep);
2888 2782
@@ -2890,8 +2784,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2890 kfree_debugcheck(objp); 2784 kfree_debugcheck(objp);
2891 page = virt_to_head_page(objp); 2785 page = virt_to_head_page(objp);
2892 2786
2893 slabp = page->slab_page;
2894
2895 if (cachep->flags & SLAB_RED_ZONE) { 2787 if (cachep->flags & SLAB_RED_ZONE) {
2896 verify_redzone_free(cachep, objp); 2788 verify_redzone_free(cachep, objp);
2897 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2789 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
@@ -2900,14 +2792,11 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2900 if (cachep->flags & SLAB_STORE_USER) 2792 if (cachep->flags & SLAB_STORE_USER)
2901 *dbg_userword(cachep, objp) = (void *)caller; 2793 *dbg_userword(cachep, objp) = (void *)caller;
2902 2794
2903 objnr = obj_to_index(cachep, slabp, objp); 2795 objnr = obj_to_index(cachep, page, objp);
2904 2796
2905 BUG_ON(objnr >= cachep->num); 2797 BUG_ON(objnr >= cachep->num);
2906 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2798 BUG_ON(objp != index_to_obj(cachep, page, objnr));
2907 2799
2908#ifdef CONFIG_DEBUG_SLAB_LEAK
2909 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2910#endif
2911 if (cachep->flags & SLAB_POISON) { 2800 if (cachep->flags & SLAB_POISON) {
2912#ifdef CONFIG_DEBUG_PAGEALLOC 2801#ifdef CONFIG_DEBUG_PAGEALLOC
2913 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2802 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2924,33 +2813,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2924 return objp; 2813 return objp;
2925} 2814}
2926 2815
2927static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2928{
2929 kmem_bufctl_t i;
2930 int entries = 0;
2931
2932 /* Check slab's freelist to see if this obj is there. */
2933 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2934 entries++;
2935 if (entries > cachep->num || i >= cachep->num)
2936 goto bad;
2937 }
2938 if (entries != cachep->num - slabp->inuse) {
2939bad:
2940 printk(KERN_ERR "slab: Internal list corruption detected in "
2941 "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
2942 cachep->name, cachep->num, slabp, slabp->inuse,
2943 print_tainted());
2944 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
2945 sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
2946 1);
2947 BUG();
2948 }
2949}
2950#else 2816#else
2951#define kfree_debugcheck(x) do { } while(0) 2817#define kfree_debugcheck(x) do { } while(0)
2952#define cache_free_debugcheck(x,objp,z) (objp) 2818#define cache_free_debugcheck(x,objp,z) (objp)
2953#define check_slabp(x,y) do { } while(0)
2954#endif 2819#endif
2955 2820
2956static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, 2821static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
@@ -2989,7 +2854,7 @@ retry:
2989 2854
2990 while (batchcount > 0) { 2855 while (batchcount > 0) {
2991 struct list_head *entry; 2856 struct list_head *entry;
2992 struct slab *slabp; 2857 struct page *page;
2993 /* Get slab alloc is to come from. */ 2858 /* Get slab alloc is to come from. */
2994 entry = n->slabs_partial.next; 2859 entry = n->slabs_partial.next;
2995 if (entry == &n->slabs_partial) { 2860 if (entry == &n->slabs_partial) {
@@ -2999,8 +2864,7 @@ retry:
2999 goto must_grow; 2864 goto must_grow;
3000 } 2865 }
3001 2866
3002 slabp = list_entry(entry, struct slab, list); 2867 page = list_entry(entry, struct page, lru);
3003 check_slabp(cachep, slabp);
3004 check_spinlock_acquired(cachep); 2868 check_spinlock_acquired(cachep);
3005 2869
3006 /* 2870 /*
@@ -3008,24 +2872,23 @@ retry:
3008 * there must be at least one object available for 2872 * there must be at least one object available for
3009 * allocation. 2873 * allocation.
3010 */ 2874 */
3011 BUG_ON(slabp->inuse >= cachep->num); 2875 BUG_ON(page->active >= cachep->num);
3012 2876
3013 while (slabp->inuse < cachep->num && batchcount--) { 2877 while (page->active < cachep->num && batchcount--) {
3014 STATS_INC_ALLOCED(cachep); 2878 STATS_INC_ALLOCED(cachep);
3015 STATS_INC_ACTIVE(cachep); 2879 STATS_INC_ACTIVE(cachep);
3016 STATS_SET_HIGH(cachep); 2880 STATS_SET_HIGH(cachep);
3017 2881
3018 ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp, 2882 ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
3019 node)); 2883 node));
3020 } 2884 }
3021 check_slabp(cachep, slabp);
3022 2885
3023 /* move slabp to correct slabp list: */ 2886 /* move slabp to correct slabp list: */
3024 list_del(&slabp->list); 2887 list_del(&page->lru);
3025 if (slabp->free == BUFCTL_END) 2888 if (page->active == cachep->num)
3026 list_add(&slabp->list, &n->slabs_full); 2889 list_add(&page->list, &n->slabs_full);
3027 else 2890 else
3028 list_add(&slabp->list, &n->slabs_partial); 2891 list_add(&page->list, &n->slabs_partial);
3029 } 2892 }
3030 2893
3031must_grow: 2894must_grow:
@@ -3097,16 +2960,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3097 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 2960 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3098 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 2961 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3099 } 2962 }
3100#ifdef CONFIG_DEBUG_SLAB_LEAK
3101 {
3102 struct slab *slabp;
3103 unsigned objnr;
3104
3105 slabp = virt_to_head_page(objp)->slab_page;
3106 objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
3107 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3108 }
3109#endif
3110 objp += obj_offset(cachep); 2963 objp += obj_offset(cachep);
3111 if (cachep->ctor && cachep->flags & SLAB_POISON) 2964 if (cachep->ctor && cachep->flags & SLAB_POISON)
3112 cachep->ctor(objp); 2965 cachep->ctor(objp);
@@ -3248,18 +3101,20 @@ retry:
3248 * We may trigger various forms of reclaim on the allowed 3101 * We may trigger various forms of reclaim on the allowed
3249 * set and go into memory reserves if necessary. 3102 * set and go into memory reserves if necessary.
3250 */ 3103 */
3104 struct page *page;
3105
3251 if (local_flags & __GFP_WAIT) 3106 if (local_flags & __GFP_WAIT)
3252 local_irq_enable(); 3107 local_irq_enable();
3253 kmem_flagcheck(cache, flags); 3108 kmem_flagcheck(cache, flags);
3254 obj = kmem_getpages(cache, local_flags, numa_mem_id()); 3109 page = kmem_getpages(cache, local_flags, numa_mem_id());
3255 if (local_flags & __GFP_WAIT) 3110 if (local_flags & __GFP_WAIT)
3256 local_irq_disable(); 3111 local_irq_disable();
3257 if (obj) { 3112 if (page) {
3258 /* 3113 /*
3259 * Insert into the appropriate per node queues 3114 * Insert into the appropriate per node queues
3260 */ 3115 */
3261 nid = page_to_nid(virt_to_page(obj)); 3116 nid = page_to_nid(page);
3262 if (cache_grow(cache, flags, nid, obj)) { 3117 if (cache_grow(cache, flags, nid, page)) {
3263 obj = ____cache_alloc_node(cache, 3118 obj = ____cache_alloc_node(cache,
3264 flags | GFP_THISNODE, nid); 3119 flags | GFP_THISNODE, nid);
3265 if (!obj) 3120 if (!obj)
@@ -3288,7 +3143,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3288 int nodeid) 3143 int nodeid)
3289{ 3144{
3290 struct list_head *entry; 3145 struct list_head *entry;
3291 struct slab *slabp; 3146 struct page *page;
3292 struct kmem_cache_node *n; 3147 struct kmem_cache_node *n;
3293 void *obj; 3148 void *obj;
3294 int x; 3149 int x;
@@ -3308,26 +3163,24 @@ retry:
3308 goto must_grow; 3163 goto must_grow;
3309 } 3164 }
3310 3165
3311 slabp = list_entry(entry, struct slab, list); 3166 page = list_entry(entry, struct page, lru);
3312 check_spinlock_acquired_node(cachep, nodeid); 3167 check_spinlock_acquired_node(cachep, nodeid);
3313 check_slabp(cachep, slabp);
3314 3168
3315 STATS_INC_NODEALLOCS(cachep); 3169 STATS_INC_NODEALLOCS(cachep);
3316 STATS_INC_ACTIVE(cachep); 3170 STATS_INC_ACTIVE(cachep);
3317 STATS_SET_HIGH(cachep); 3171 STATS_SET_HIGH(cachep);
3318 3172
3319 BUG_ON(slabp->inuse == cachep->num); 3173 BUG_ON(page->active == cachep->num);
3320 3174
3321 obj = slab_get_obj(cachep, slabp, nodeid); 3175 obj = slab_get_obj(cachep, page, nodeid);
3322 check_slabp(cachep, slabp);
3323 n->free_objects--; 3176 n->free_objects--;
3324 /* move slabp to correct slabp list: */ 3177 /* move slabp to correct slabp list: */
3325 list_del(&slabp->list); 3178 list_del(&page->lru);
3326 3179
3327 if (slabp->free == BUFCTL_END) 3180 if (page->active == cachep->num)
3328 list_add(&slabp->list, &n->slabs_full); 3181 list_add(&page->lru, &n->slabs_full);
3329 else 3182 else
3330 list_add(&slabp->list, &n->slabs_partial); 3183 list_add(&page->lru, &n->slabs_partial);
3331 3184
3332 spin_unlock(&n->list_lock); 3185 spin_unlock(&n->list_lock);
3333 goto done; 3186 goto done;
@@ -3477,23 +3330,21 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3477 3330
3478 for (i = 0; i < nr_objects; i++) { 3331 for (i = 0; i < nr_objects; i++) {
3479 void *objp; 3332 void *objp;
3480 struct slab *slabp; 3333 struct page *page;
3481 3334
3482 clear_obj_pfmemalloc(&objpp[i]); 3335 clear_obj_pfmemalloc(&objpp[i]);
3483 objp = objpp[i]; 3336 objp = objpp[i];
3484 3337
3485 slabp = virt_to_slab(objp); 3338 page = virt_to_head_page(objp);
3486 n = cachep->node[node]; 3339 n = cachep->node[node];
3487 list_del(&slabp->list); 3340 list_del(&page->lru);
3488 check_spinlock_acquired_node(cachep, node); 3341 check_spinlock_acquired_node(cachep, node);
3489 check_slabp(cachep, slabp); 3342 slab_put_obj(cachep, page, objp, node);
3490 slab_put_obj(cachep, slabp, objp, node);
3491 STATS_DEC_ACTIVE(cachep); 3343 STATS_DEC_ACTIVE(cachep);
3492 n->free_objects++; 3344 n->free_objects++;
3493 check_slabp(cachep, slabp);
3494 3345
3495 /* fixup slab chains */ 3346 /* fixup slab chains */
3496 if (slabp->inuse == 0) { 3347 if (page->active == 0) {
3497 if (n->free_objects > n->free_limit) { 3348 if (n->free_objects > n->free_limit) {
3498 n->free_objects -= cachep->num; 3349 n->free_objects -= cachep->num;
3499 /* No need to drop any previously held 3350 /* No need to drop any previously held
@@ -3502,16 +3353,16 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3502 * a different cache, refer to comments before 3353 * a different cache, refer to comments before
3503 * alloc_slabmgmt. 3354 * alloc_slabmgmt.
3504 */ 3355 */
3505 slab_destroy(cachep, slabp); 3356 slab_destroy(cachep, page);
3506 } else { 3357 } else {
3507 list_add(&slabp->list, &n->slabs_free); 3358 list_add(&page->lru, &n->slabs_free);
3508 } 3359 }
3509 } else { 3360 } else {
3510 /* Unconditionally move a slab to the end of the 3361 /* Unconditionally move a slab to the end of the
3511 * partial list on free - maximum time for the 3362 * partial list on free - maximum time for the
3512 * other objects to be freed, too. 3363 * other objects to be freed, too.
3513 */ 3364 */
3514 list_add_tail(&slabp->list, &n->slabs_partial); 3365 list_add_tail(&page->lru, &n->slabs_partial);
3515 } 3366 }
3516 } 3367 }
3517} 3368}
@@ -3551,10 +3402,10 @@ free_done:
3551 3402
3552 p = n->slabs_free.next; 3403 p = n->slabs_free.next;
3553 while (p != &(n->slabs_free)) { 3404 while (p != &(n->slabs_free)) {
3554 struct slab *slabp; 3405 struct page *page;
3555 3406
3556 slabp = list_entry(p, struct slab, list); 3407 page = list_entry(p, struct page, lru);
3557 BUG_ON(slabp->inuse); 3408 BUG_ON(page->active);
3558 3409
3559 i++; 3410 i++;
3560 p = p->next; 3411 p = p->next;
@@ -4158,7 +4009,7 @@ out:
4158#ifdef CONFIG_SLABINFO 4009#ifdef CONFIG_SLABINFO
4159void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) 4010void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4160{ 4011{
4161 struct slab *slabp; 4012 struct page *page;
4162 unsigned long active_objs; 4013 unsigned long active_objs;
4163 unsigned long num_objs; 4014 unsigned long num_objs;
4164 unsigned long active_slabs = 0; 4015 unsigned long active_slabs = 0;
@@ -4178,23 +4029,23 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4178 check_irq_on(); 4029 check_irq_on();
4179 spin_lock_irq(&n->list_lock); 4030 spin_lock_irq(&n->list_lock);
4180 4031
4181 list_for_each_entry(slabp, &n->slabs_full, list) { 4032 list_for_each_entry(page, &n->slabs_full, lru) {
4182 if (slabp->inuse != cachep->num && !error) 4033 if (page->active != cachep->num && !error)
4183 error = "slabs_full accounting error"; 4034 error = "slabs_full accounting error";
4184 active_objs += cachep->num; 4035 active_objs += cachep->num;
4185 active_slabs++; 4036 active_slabs++;
4186 } 4037 }
4187 list_for_each_entry(slabp, &n->slabs_partial, list) { 4038 list_for_each_entry(page, &n->slabs_partial, lru) {
4188 if (slabp->inuse == cachep->num && !error) 4039 if (page->active == cachep->num && !error)
4189 error = "slabs_partial inuse accounting error"; 4040 error = "slabs_partial accounting error";
4190 if (!slabp->inuse && !error) 4041 if (!page->active && !error)
4191 error = "slabs_partial/inuse accounting error"; 4042 error = "slabs_partial accounting error";
4192 active_objs += slabp->inuse; 4043 active_objs += page->active;
4193 active_slabs++; 4044 active_slabs++;
4194 } 4045 }
4195 list_for_each_entry(slabp, &n->slabs_free, list) { 4046 list_for_each_entry(page, &n->slabs_free, lru) {
4196 if (slabp->inuse && !error) 4047 if (page->active && !error)
4197 error = "slabs_free/inuse accounting error"; 4048 error = "slabs_free accounting error";
4198 num_slabs++; 4049 num_slabs++;
4199 } 4050 }
4200 free_objects += n->free_objects; 4051 free_objects += n->free_objects;
@@ -4346,15 +4197,27 @@ static inline int add_caller(unsigned long *n, unsigned long v)
4346 return 1; 4197 return 1;
4347} 4198}
4348 4199
4349static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) 4200static void handle_slab(unsigned long *n, struct kmem_cache *c,
4201 struct page *page)
4350{ 4202{
4351 void *p; 4203 void *p;
4352 int i; 4204 int i, j;
4205
4353 if (n[0] == n[1]) 4206 if (n[0] == n[1])
4354 return; 4207 return;
4355 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { 4208 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4356 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) 4209 bool active = true;
4210
4211 for (j = page->active; j < c->num; j++) {
4212 /* Skip freed item */
4213 if (slab_freelist(page)[j] == i) {
4214 active = false;
4215 break;
4216 }
4217 }
4218 if (!active)
4357 continue; 4219 continue;
4220
4358 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4221 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4359 return; 4222 return;
4360 } 4223 }
@@ -4379,7 +4242,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
4379static int leaks_show(struct seq_file *m, void *p) 4242static int leaks_show(struct seq_file *m, void *p)
4380{ 4243{
4381 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); 4244 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4382 struct slab *slabp; 4245 struct page *page;
4383 struct kmem_cache_node *n; 4246 struct kmem_cache_node *n;
4384 const char *name; 4247 const char *name;
4385 unsigned long *x = m->private; 4248 unsigned long *x = m->private;
@@ -4403,10 +4266,10 @@ static int leaks_show(struct seq_file *m, void *p)
4403 check_irq_on(); 4266 check_irq_on();
4404 spin_lock_irq(&n->list_lock); 4267 spin_lock_irq(&n->list_lock);
4405 4268
4406 list_for_each_entry(slabp, &n->slabs_full, list) 4269 list_for_each_entry(page, &n->slabs_full, lru)
4407 handle_slab(x, cachep, slabp); 4270 handle_slab(x, cachep, page);
4408 list_for_each_entry(slabp, &n->slabs_partial, list) 4271 list_for_each_entry(page, &n->slabs_partial, lru)
4409 handle_slab(x, cachep, slabp); 4272 handle_slab(x, cachep, page);
4410 spin_unlock_irq(&n->list_lock); 4273 spin_unlock_irq(&n->list_lock);
4411 } 4274 }
4412 name = cachep->name; 4275 name = cachep->name;
diff --git a/mm/slub.c b/mm/slub.c
index 7e8bd8d828bc..545a170ebf9f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -155,7 +155,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
155/* 155/*
156 * Maximum number of desirable partial slabs. 156 * Maximum number of desirable partial slabs.
157 * The existence of more partial slabs makes kmem_cache_shrink 157 * The existence of more partial slabs makes kmem_cache_shrink
158 * sort the partial list by the number of objects in the. 158 * sort the partial list by the number of objects in use.
159 */ 159 */
160#define MAX_PARTIAL 10 160#define MAX_PARTIAL 10
161 161
@@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
933 * Hooks for other subsystems that check memory allocations. In a typical 933 * Hooks for other subsystems that check memory allocations. In a typical
934 * production configuration these hooks all should produce no code at all. 934 * production configuration these hooks all should produce no code at all.
935 */ 935 */
936static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
937{
938 kmemleak_alloc(ptr, size, 1, flags);
939}
940
941static inline void kfree_hook(const void *x)
942{
943 kmemleak_free(x);
944}
945
936static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 946static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
937{ 947{
938 flags &= gfp_allowed_mask; 948 flags &= gfp_allowed_mask;
@@ -1217,8 +1227,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size,
1217 /* 1227 /*
1218 * Enable debugging if selected on the kernel commandline. 1228 * Enable debugging if selected on the kernel commandline.
1219 */ 1229 */
1220 if (slub_debug && (!slub_debug_slabs || 1230 if (slub_debug && (!slub_debug_slabs || (name &&
1221 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) 1231 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1222 flags |= slub_debug; 1232 flags |= slub_debug;
1223 1233
1224 return flags; 1234 return flags;
@@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
1260static inline void dec_slabs_node(struct kmem_cache *s, int node, 1270static inline void dec_slabs_node(struct kmem_cache *s, int node,
1261 int objects) {} 1271 int objects) {}
1262 1272
1273static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1274{
1275 kmemleak_alloc(ptr, size, 1, flags);
1276}
1277
1278static inline void kfree_hook(const void *x)
1279{
1280 kmemleak_free(x);
1281}
1282
1263static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 1283static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1264 { return 0; } 1284 { return 0; }
1265 1285
1266static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 1286static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1267 void *object) {} 1287 void *object)
1288{
1289 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
1290 flags & gfp_allowed_mask);
1291}
1268 1292
1269static inline void slab_free_hook(struct kmem_cache *s, void *x) {} 1293static inline void slab_free_hook(struct kmem_cache *s, void *x)
1294{
1295 kmemleak_free_recursive(x, s->flags);
1296}
1270 1297
1271#endif /* CONFIG_SLUB_DEBUG */ 1298#endif /* CONFIG_SLUB_DEBUG */
1272 1299
@@ -2829,8 +2856,8 @@ static struct kmem_cache *kmem_cache_node;
2829 * slab on the node for this slabcache. There are no concurrent accesses 2856 * slab on the node for this slabcache. There are no concurrent accesses
2830 * possible. 2857 * possible.
2831 * 2858 *
2832 * Note that this function only works on the kmalloc_node_cache 2859 * Note that this function only works on the kmem_cache_node
2833 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2860 * when allocating for the kmem_cache_node. This is used for bootstrapping
2834 * memory on a fresh node that has no slab structures yet. 2861 * memory on a fresh node that has no slab structures yet.
2835 */ 2862 */
2836static void early_kmem_cache_node_alloc(int node) 2863static void early_kmem_cache_node_alloc(int node)
@@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3272 if (page) 3299 if (page)
3273 ptr = page_address(page); 3300 ptr = page_address(page);
3274 3301
3275 kmemleak_alloc(ptr, size, 1, flags); 3302 kmalloc_large_node_hook(ptr, size, flags);
3276 return ptr; 3303 return ptr;
3277} 3304}
3278 3305
@@ -3336,7 +3363,7 @@ void kfree(const void *x)
3336 page = virt_to_head_page(x); 3363 page = virt_to_head_page(x);
3337 if (unlikely(!PageSlab(page))) { 3364 if (unlikely(!PageSlab(page))) {
3338 BUG_ON(!PageCompound(page)); 3365 BUG_ON(!PageCompound(page));
3339 kmemleak_free(x); 3366 kfree_hook(x);
3340 __free_memcg_kmem_pages(page, compound_order(page)); 3367 __free_memcg_kmem_pages(page, compound_order(page));
3341 return; 3368 return;
3342 } 3369 }
diff --git a/mm/swap.c b/mm/swap.c
index 7a9f80d451f5..84b26aaabd03 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -82,19 +82,6 @@ static void __put_compound_page(struct page *page)
82 82
83static void put_compound_page(struct page *page) 83static void put_compound_page(struct page *page)
84{ 84{
85 /*
86 * hugetlbfs pages cannot be split from under us. If this is a
87 * hugetlbfs page, check refcount on head page and release the page if
88 * the refcount becomes zero.
89 */
90 if (PageHuge(page)) {
91 page = compound_head(page);
92 if (put_page_testzero(page))
93 __put_compound_page(page);
94
95 return;
96 }
97
98 if (unlikely(PageTail(page))) { 85 if (unlikely(PageTail(page))) {
99 /* __split_huge_page_refcount can run under us */ 86 /* __split_huge_page_refcount can run under us */
100 struct page *page_head = compound_trans_head(page); 87 struct page *page_head = compound_trans_head(page);
@@ -111,14 +98,31 @@ static void put_compound_page(struct page *page)
111 * still hot on arches that do not support 98 * still hot on arches that do not support
112 * this_cpu_cmpxchg_double(). 99 * this_cpu_cmpxchg_double().
113 */ 100 */
114 if (PageSlab(page_head)) { 101 if (PageSlab(page_head) || PageHeadHuge(page_head)) {
115 if (PageTail(page)) { 102 if (likely(PageTail(page))) {
103 /*
104 * __split_huge_page_refcount
105 * cannot race here.
106 */
107 VM_BUG_ON(!PageHead(page_head));
108 atomic_dec(&page->_mapcount);
116 if (put_page_testzero(page_head)) 109 if (put_page_testzero(page_head))
117 VM_BUG_ON(1); 110 VM_BUG_ON(1);
118 111 if (put_page_testzero(page_head))
119 atomic_dec(&page->_mapcount); 112 __put_compound_page(page_head);
120 goto skip_lock_tail; 113 return;
121 } else 114 } else
115 /*
116 * __split_huge_page_refcount
117 * run before us, "page" was a
118 * THP tail. The split
119 * page_head has been freed
120 * and reallocated as slab or
121 * hugetlbfs page of smaller
122 * order (only possible if
123 * reallocated as slab on
124 * x86).
125 */
122 goto skip_lock; 126 goto skip_lock;
123 } 127 }
124 /* 128 /*
@@ -132,8 +136,27 @@ static void put_compound_page(struct page *page)
132 /* __split_huge_page_refcount run before us */ 136 /* __split_huge_page_refcount run before us */
133 compound_unlock_irqrestore(page_head, flags); 137 compound_unlock_irqrestore(page_head, flags);
134skip_lock: 138skip_lock:
135 if (put_page_testzero(page_head)) 139 if (put_page_testzero(page_head)) {
136 __put_single_page(page_head); 140 /*
141 * The head page may have been
142 * freed and reallocated as a
143 * compound page of smaller
144 * order and then freed again.
145 * All we know is that it
146 * cannot have become: a THP
147 * page, a compound page of
148 * higher order, a tail page.
149 * That is because we still
150 * hold the refcount of the
151 * split THP tail and
152 * page_head was the THP head
153 * before the split.
154 */
155 if (PageHead(page_head))
156 __put_compound_page(page_head);
157 else
158 __put_single_page(page_head);
159 }
137out_put_single: 160out_put_single:
138 if (put_page_testzero(page)) 161 if (put_page_testzero(page))
139 __put_single_page(page); 162 __put_single_page(page);
@@ -155,7 +178,6 @@ out_put_single:
155 VM_BUG_ON(atomic_read(&page->_count) != 0); 178 VM_BUG_ON(atomic_read(&page->_count) != 0);
156 compound_unlock_irqrestore(page_head, flags); 179 compound_unlock_irqrestore(page_head, flags);
157 180
158skip_lock_tail:
159 if (put_page_testzero(page_head)) { 181 if (put_page_testzero(page_head)) {
160 if (PageHead(page_head)) 182 if (PageHead(page_head))
161 __put_compound_page(page_head); 183 __put_compound_page(page_head);
@@ -198,51 +220,52 @@ bool __get_page_tail(struct page *page)
198 * proper PT lock that already serializes against 220 * proper PT lock that already serializes against
199 * split_huge_page(). 221 * split_huge_page().
200 */ 222 */
223 unsigned long flags;
201 bool got = false; 224 bool got = false;
202 struct page *page_head; 225 struct page *page_head = compound_trans_head(page);
203
204 /*
205 * If this is a hugetlbfs page it cannot be split under us. Simply
206 * increment refcount for the head page.
207 */
208 if (PageHuge(page)) {
209 page_head = compound_head(page);
210 atomic_inc(&page_head->_count);
211 got = true;
212 } else {
213 unsigned long flags;
214 226
215 page_head = compound_trans_head(page); 227 if (likely(page != page_head && get_page_unless_zero(page_head))) {
216 if (likely(page != page_head && 228 /* Ref to put_compound_page() comment. */
217 get_page_unless_zero(page_head))) { 229 if (PageSlab(page_head) || PageHeadHuge(page_head)) {
218
219 /* Ref to put_compound_page() comment. */
220 if (PageSlab(page_head)) {
221 if (likely(PageTail(page))) {
222 __get_page_tail_foll(page, false);
223 return true;
224 } else {
225 put_page(page_head);
226 return false;
227 }
228 }
229
230 /*
231 * page_head wasn't a dangling pointer but it
232 * may not be a head page anymore by the time
233 * we obtain the lock. That is ok as long as it
234 * can't be freed from under us.
235 */
236 flags = compound_lock_irqsave(page_head);
237 /* here __split_huge_page_refcount won't run anymore */
238 if (likely(PageTail(page))) { 230 if (likely(PageTail(page))) {
231 /*
232 * This is a hugetlbfs page or a slab
233 * page. __split_huge_page_refcount
234 * cannot race here.
235 */
236 VM_BUG_ON(!PageHead(page_head));
239 __get_page_tail_foll(page, false); 237 __get_page_tail_foll(page, false);
240 got = true; 238 return true;
241 } 239 } else {
242 compound_unlock_irqrestore(page_head, flags); 240 /*
243 if (unlikely(!got)) 241 * __split_huge_page_refcount run
242 * before us, "page" was a THP
243 * tail. The split page_head has been
244 * freed and reallocated as slab or
245 * hugetlbfs page of smaller order
246 * (only possible if reallocated as
247 * slab on x86).
248 */
244 put_page(page_head); 249 put_page(page_head);
250 return false;
251 }
252 }
253
254 /*
255 * page_head wasn't a dangling pointer but it
256 * may not be a head page anymore by the time
257 * we obtain the lock. That is ok as long as it
258 * can't be freed from under us.
259 */
260 flags = compound_lock_irqsave(page_head);
261 /* here __split_huge_page_refcount won't run anymore */
262 if (likely(PageTail(page))) {
263 __get_page_tail_foll(page, false);
264 got = true;
245 } 265 }
266 compound_unlock_irqrestore(page_head, flags);
267 if (unlikely(!got))
268 put_page(page_head);
246 } 269 }
247 return got; 270 return got;
248} 271}
diff --git a/net/Kconfig b/net/Kconfig
index 0715db64a5c3..d334678c0bd8 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -224,7 +224,7 @@ source "net/hsr/Kconfig"
224 224
225config RPS 225config RPS
226 boolean 226 boolean
227 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS 227 depends on SMP && SYSFS
228 default y 228 default y
229 229
230config RFS_ACCEL 230config RFS_ACCEL
@@ -235,7 +235,7 @@ config RFS_ACCEL
235 235
236config XPS 236config XPS
237 boolean 237 boolean
238 depends on SMP && USE_GENERIC_SMP_HELPERS 238 depends on SMP
239 default y 239 default y
240 240
241config NETPRIO_CGROUP 241config NETPRIO_CGROUP
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 7fee50d637f9..7d424ac6e760 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1735,7 +1735,6 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1735 size_t size, int flags) 1735 size_t size, int flags)
1736{ 1736{
1737 struct sock *sk = sock->sk; 1737 struct sock *sk = sock->sk;
1738 struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
1739 struct ddpehdr *ddp; 1738 struct ddpehdr *ddp;
1740 int copied = 0; 1739 int copied = 0;
1741 int offset = 0; 1740 int offset = 0;
@@ -1764,14 +1763,13 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1764 } 1763 }
1765 err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied); 1764 err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
1766 1765
1767 if (!err) { 1766 if (!err && msg->msg_name) {
1768 if (sat) { 1767 struct sockaddr_at *sat = msg->msg_name;
1769 sat->sat_family = AF_APPLETALK; 1768 sat->sat_family = AF_APPLETALK;
1770 sat->sat_port = ddp->deh_sport; 1769 sat->sat_port = ddp->deh_sport;
1771 sat->sat_addr.s_node = ddp->deh_snode; 1770 sat->sat_addr.s_node = ddp->deh_snode;
1772 sat->sat_addr.s_net = ddp->deh_snet; 1771 sat->sat_addr.s_net = ddp->deh_snet;
1773 } 1772 msg->msg_namelen = sizeof(*sat);
1774 msg->msg_namelen = sizeof(*sat);
1775 } 1773 }
1776 1774
1777 skb_free_datagram(sk, skb); /* Free the datagram. */ 1775 skb_free_datagram(sk, skb); /* Free the datagram. */
diff --git a/net/atm/common.c b/net/atm/common.c
index 737bef59ce89..7b491006eaf4 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -531,8 +531,6 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
531 struct sk_buff *skb; 531 struct sk_buff *skb;
532 int copied, error = -EINVAL; 532 int copied, error = -EINVAL;
533 533
534 msg->msg_namelen = 0;
535
536 if (sock->state != SS_CONNECTED) 534 if (sock->state != SS_CONNECTED)
537 return -ENOTCONN; 535 return -ENOTCONN;
538 536
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index a00123ebb0ae..7bb1605bdfd9 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1636,11 +1636,11 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
1636 1636
1637 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1637 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1638 1638
1639 if (msg->msg_namelen != 0) { 1639 if (msg->msg_name) {
1640 struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
1641 ax25_digi digi; 1640 ax25_digi digi;
1642 ax25_address src; 1641 ax25_address src;
1643 const unsigned char *mac = skb_mac_header(skb); 1642 const unsigned char *mac = skb_mac_header(skb);
1643 struct sockaddr_ax25 *sax = msg->msg_name;
1644 1644
1645 memset(sax, 0, sizeof(struct full_sockaddr_ax25)); 1645 memset(sax, 0, sizeof(struct full_sockaddr_ax25));
1646 ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, 1646 ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index f6a1671ea2ff..56ca494621c6 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -224,10 +224,9 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
224 224
225 skb = skb_recv_datagram(sk, flags, noblock, &err); 225 skb = skb_recv_datagram(sk, flags, noblock, &err);
226 if (!skb) { 226 if (!skb) {
227 if (sk->sk_shutdown & RCV_SHUTDOWN) { 227 if (sk->sk_shutdown & RCV_SHUTDOWN)
228 msg->msg_namelen = 0;
229 return 0; 228 return 0;
230 } 229
231 return err; 230 return err;
232 } 231 }
233 232
@@ -245,8 +244,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
245 if (bt_sk(sk)->skb_msg_name) 244 if (bt_sk(sk)->skb_msg_name)
246 bt_sk(sk)->skb_msg_name(skb, msg->msg_name, 245 bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
247 &msg->msg_namelen); 246 &msg->msg_namelen);
248 else
249 msg->msg_namelen = 0;
250 } 247 }
251 248
252 skb_free_datagram(sk, skb); 249 skb_free_datagram(sk, skb);
@@ -295,8 +292,6 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
295 if (flags & MSG_OOB) 292 if (flags & MSG_OOB)
296 return -EOPNOTSUPP; 293 return -EOPNOTSUPP;
297 294
298 msg->msg_namelen = 0;
299
300 BT_DBG("sk %p size %zu", sk, size); 295 BT_DBG("sk %p size %zu", sk, size);
301 296
302 lock_sock(sk); 297 lock_sock(sk);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 71f0be173080..6a6c8bb4fd72 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -856,8 +856,6 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
856 if (!skb) 856 if (!skb)
857 return err; 857 return err;
858 858
859 msg->msg_namelen = 0;
860
861 copied = skb->len; 859 copied = skb->len;
862 if (len < copied) { 860 if (len < copied) {
863 msg->msg_flags |= MSG_TRUNC; 861 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 0cef67707838..4af3821df880 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2439,6 +2439,9 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2439 int err; 2439 int err;
2440 struct sk_buff_head seg_queue; 2440 struct sk_buff_head seg_queue;
2441 2441
2442 if (!chan->conn)
2443 return -ENOTCONN;
2444
2442 /* Connectionless channel */ 2445 /* Connectionless channel */
2443 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 2446 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2444 skb = l2cap_create_connless_pdu(chan, msg, len, priority); 2447 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 94d06cbfbc18..facd8a79c038 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -694,6 +694,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
694 addr.l2_family = AF_BLUETOOTH; 694 addr.l2_family = AF_BLUETOOTH;
695 addr.l2_psm = 0; 695 addr.l2_psm = 0;
696 addr.l2_cid = 0; 696 addr.l2_cid = 0;
697 addr.l2_bdaddr_type = BDADDR_BREDR;
697 *err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); 698 *err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
698 if (*err < 0) 699 if (*err < 0)
699 goto failed; 700 goto failed;
@@ -719,6 +720,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
719 addr.l2_family = AF_BLUETOOTH; 720 addr.l2_family = AF_BLUETOOTH;
720 addr.l2_psm = __constant_cpu_to_le16(RFCOMM_PSM); 721 addr.l2_psm = __constant_cpu_to_le16(RFCOMM_PSM);
721 addr.l2_cid = 0; 722 addr.l2_cid = 0;
723 addr.l2_bdaddr_type = BDADDR_BREDR;
722 *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); 724 *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
723 if (*err == 0 || *err == -EINPROGRESS) 725 if (*err == 0 || *err == -EINPROGRESS)
724 return s; 726 return s;
@@ -1983,6 +1985,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
1983 addr.l2_family = AF_BLUETOOTH; 1985 addr.l2_family = AF_BLUETOOTH;
1984 addr.l2_psm = __constant_cpu_to_le16(RFCOMM_PSM); 1986 addr.l2_psm = __constant_cpu_to_le16(RFCOMM_PSM);
1985 addr.l2_cid = 0; 1987 addr.l2_cid = 0;
1988 addr.l2_bdaddr_type = BDADDR_BREDR;
1986 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); 1989 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
1987 if (err < 0) { 1990 if (err < 0) {
1988 BT_ERR("Bind failed %d", err); 1991 BT_ERR("Bind failed %d", err);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index c4d3d423f89b..3c2d3e4aa2f5 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -615,7 +615,6 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
615 615
616 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { 616 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
617 rfcomm_dlc_accept(d); 617 rfcomm_dlc_accept(d);
618 msg->msg_namelen = 0;
619 return 0; 618 return 0;
620 } 619 }
621 620
@@ -739,8 +738,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
739static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) 738static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
740{ 739{
741 struct sock *sk = sock->sk; 740 struct sock *sk = sock->sk;
741 struct sock *l2cap_sk;
742 struct l2cap_conn *conn;
742 struct rfcomm_conninfo cinfo; 743 struct rfcomm_conninfo cinfo;
743 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
744 int len, err = 0; 744 int len, err = 0;
745 u32 opt; 745 u32 opt;
746 746
@@ -783,6 +783,9 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
783 break; 783 break;
784 } 784 }
785 785
786 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
787 conn = l2cap_pi(l2cap_sk)->chan->conn;
788
786 memset(&cinfo, 0, sizeof(cinfo)); 789 memset(&cinfo, 0, sizeof(cinfo));
787 cinfo.hci_handle = conn->hcon->handle; 790 cinfo.hci_handle = conn->hcon->handle;
788 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); 791 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 12a0e51e21e1..24fa3964b3c8 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -711,7 +711,6 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
711 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { 711 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
712 sco_conn_defer_accept(pi->conn->hcon, pi->setting); 712 sco_conn_defer_accept(pi->conn->hcon, pi->setting);
713 sk->sk_state = BT_CONFIG; 713 sk->sk_state = BT_CONFIG;
714 msg->msg_namelen = 0;
715 714
716 release_sock(sk); 715 release_sock(sk);
717 return 0; 716 return 0;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 85a2796cac61..4b07acb8293c 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -742,6 +742,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
742 742
743 BT_DBG("conn %p", conn); 743 BT_DBG("conn %p", conn);
744 744
745 if (!(conn->hcon->link_mode & HCI_LM_MASTER))
746 return SMP_CMD_NOTSUPP;
747
745 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); 748 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
746 749
747 if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) 750 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index c41d5fbb91d0..4bf02adb5dc2 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -172,6 +172,9 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
172 del_nbp(p); 172 del_nbp(p);
173 } 173 }
174 174
175 br_fdb_delete_by_port(br, NULL, 1);
176
177 br_vlan_flush(br);
175 del_timer_sync(&br->gc_timer); 178 del_timer_sync(&br->gc_timer);
176 179
177 br_sysfs_delbr(br->dev); 180 br_sysfs_delbr(br->dev);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 229d820bdf0b..045d56eaeca2 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -426,6 +426,16 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
426int br_handle_frame_finish(struct sk_buff *skb); 426int br_handle_frame_finish(struct sk_buff *skb);
427rx_handler_result_t br_handle_frame(struct sk_buff **pskb); 427rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
428 428
429static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
430{
431 return rcu_dereference(dev->rx_handler) == br_handle_frame;
432}
433
434static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
435{
436 return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
437}
438
429/* br_ioctl.c */ 439/* br_ioctl.c */
430int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 440int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
431int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, 441int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8660ea3be705..bdb459d21ad8 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
153 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) 153 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
154 goto err; 154 goto err;
155 155
156 p = br_port_get_rcu(dev); 156 p = br_port_get_check_rcu(dev);
157 if (!p) 157 if (!p)
158 goto err; 158 goto err;
159 159
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 53f0990eab58..af5ebd18d705 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -34,7 +34,6 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
34 34
35static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) 35static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
36{ 36{
37 const struct net_device_ops *ops;
38 struct net_bridge_port *p = NULL; 37 struct net_bridge_port *p = NULL;
39 struct net_bridge *br; 38 struct net_bridge *br;
40 struct net_device *dev; 39 struct net_device *dev;
@@ -53,17 +52,15 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
53 br = v->parent.br; 52 br = v->parent.br;
54 dev = br->dev; 53 dev = br->dev;
55 } 54 }
56 ops = dev->netdev_ops;
57 55
58 if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { 56 if (p) {
59 /* Add VLAN to the device filter if it is supported. 57 /* Add VLAN to the device filter if it is supported.
60 * Stricly speaking, this is not necessary now, since 58 * Stricly speaking, this is not necessary now, since
61 * devices are made promiscuous by the bridge, but if 59 * devices are made promiscuous by the bridge, but if
62 * that ever changes this code will allow tagged 60 * that ever changes this code will allow tagged
63 * traffic to enter the bridge. 61 * traffic to enter the bridge.
64 */ 62 */
65 err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), 63 err = vlan_vid_add(dev, htons(ETH_P_8021Q), vid);
66 vid);
67 if (err) 64 if (err)
68 return err; 65 return err;
69 } 66 }
@@ -82,8 +79,8 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
82 return 0; 79 return 0;
83 80
84out_filt: 81out_filt:
85 if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 82 if (p)
86 ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid); 83 vlan_vid_del(dev, htons(ETH_P_8021Q), vid);
87 return err; 84 return err;
88} 85}
89 86
@@ -95,13 +92,8 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
95 __vlan_delete_pvid(v, vid); 92 __vlan_delete_pvid(v, vid);
96 clear_bit(vid, v->untagged_bitmap); 93 clear_bit(vid, v->untagged_bitmap);
97 94
98 if (v->port_idx) { 95 if (v->port_idx)
99 struct net_device *dev = v->parent.port->dev; 96 vlan_vid_del(v->parent.port->dev, htons(ETH_P_8021Q), vid);
100 const struct net_device_ops *ops = dev->netdev_ops;
101
102 if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
103 ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid);
104 }
105 97
106 clear_bit(vid, v->vlan_bitmap); 98 clear_bit(vid, v->vlan_bitmap);
107 v->num_vlans--; 99 v->num_vlans--;
@@ -398,6 +390,7 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
398void nbp_vlan_flush(struct net_bridge_port *port) 390void nbp_vlan_flush(struct net_bridge_port *port)
399{ 391{
400 struct net_port_vlans *pv; 392 struct net_port_vlans *pv;
393 u16 vid;
401 394
402 ASSERT_RTNL(); 395 ASSERT_RTNL();
403 396
@@ -405,6 +398,9 @@ void nbp_vlan_flush(struct net_bridge_port *port)
405 if (!pv) 398 if (!pv)
406 return; 399 return;
407 400
401 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
402 vlan_vid_del(port->dev, htons(ETH_P_8021Q), vid);
403
408 __vlan_flush(pv); 404 __vlan_flush(pv);
409} 405}
410 406
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 99c85668f551..17fd5f2cb4b8 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -48,10 +48,12 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
48 if (info->bitmask & EBT_IP6_TCLASS && 48 if (info->bitmask & EBT_IP6_TCLASS &&
49 FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS)) 49 FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS))
50 return false; 50 return false;
51 if (FWINV(ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk, 51 if ((info->bitmask & EBT_IP6_SOURCE &&
52 &info->saddr), EBT_IP6_SOURCE) || 52 FWINV(ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk,
53 &info->saddr), EBT_IP6_SOURCE)) ||
54 (info->bitmask & EBT_IP6_DEST &&
53 FWINV(ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk, 55 FWINV(ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk,
54 &info->daddr), EBT_IP6_DEST)) 56 &info->daddr), EBT_IP6_DEST)))
55 return false; 57 return false;
56 if (info->bitmask & EBT_IP6_PROTO) { 58 if (info->bitmask & EBT_IP6_PROTO) {
57 uint8_t nexthdr = ih6->nexthdr; 59 uint8_t nexthdr = ih6->nexthdr;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 05a41c7ec304..d6be3edb7a43 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -286,8 +286,6 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
286 if (m->msg_flags&MSG_OOB) 286 if (m->msg_flags&MSG_OOB)
287 goto read_error; 287 goto read_error;
288 288
289 m->msg_namelen = 0;
290
291 skb = skb_recv_datagram(sk, flags, 0 , &ret); 289 skb = skb_recv_datagram(sk, flags, 0 , &ret);
292 if (!skb) 290 if (!skb)
293 goto read_error; 291 goto read_error;
@@ -361,8 +359,6 @@ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
361 if (flags&MSG_OOB) 359 if (flags&MSG_OOB)
362 goto out; 360 goto out;
363 361
364 msg->msg_namelen = 0;
365
366 /* 362 /*
367 * Lock the socket to prevent queue disordering 363 * Lock the socket to prevent queue disordering
368 * while sleeps in memcpy_tomsg 364 * while sleeps in memcpy_tomsg
diff --git a/net/compat.c b/net/compat.c
index 89032580bd1d..dd32e34c1e2c 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72 __get_user(kmsg->msg_flags, &umsg->msg_flags)) 72 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73 return -EFAULT; 73 return -EFAULT;
74 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 74 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
75 return -EINVAL; 75 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
76 kmsg->msg_name = compat_ptr(tmp1); 76 kmsg->msg_name = compat_ptr(tmp1);
77 kmsg->msg_iov = compat_ptr(tmp2); 77 kmsg->msg_iov = compat_ptr(tmp2);
78 kmsg->msg_control = compat_ptr(tmp3); 78 kmsg->msg_control = compat_ptr(tmp3);
@@ -93,7 +93,8 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
93 if (err < 0) 93 if (err < 0)
94 return err; 94 return err;
95 } 95 }
96 kern_msg->msg_name = kern_address; 96 if (kern_msg->msg_name)
97 kern_msg->msg_name = kern_address;
97 } else 98 } else
98 kern_msg->msg_name = NULL; 99 kern_msg->msg_name = NULL;
99 100
diff --git a/net/core/dev.c b/net/core/dev.c
index 8ffc52e01ece..ba3b7ea5ebb3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -131,6 +131,7 @@
131#include <linux/static_key.h> 131#include <linux/static_key.h>
132#include <linux/hashtable.h> 132#include <linux/hashtable.h>
133#include <linux/vmalloc.h> 133#include <linux/vmalloc.h>
134#include <linux/if_macvlan.h>
134 135
135#include "net-sysfs.h" 136#include "net-sysfs.h"
136 137
@@ -1424,6 +1425,10 @@ void dev_disable_lro(struct net_device *dev)
1424 if (is_vlan_dev(dev)) 1425 if (is_vlan_dev(dev))
1425 dev = vlan_dev_real_dev(dev); 1426 dev = vlan_dev_real_dev(dev);
1426 1427
1428 /* the same for macvlan devices */
1429 if (netif_is_macvlan(dev))
1430 dev = macvlan_dev_real_dev(dev);
1431
1427 dev->wanted_features &= ~NETIF_F_LRO; 1432 dev->wanted_features &= ~NETIF_F_LRO;
1428 netdev_update_features(dev); 1433 netdev_update_features(dev);
1429 1434
@@ -1690,13 +1695,9 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1690 kfree_skb(skb); 1695 kfree_skb(skb);
1691 return NET_RX_DROP; 1696 return NET_RX_DROP;
1692 } 1697 }
1693 skb->protocol = eth_type_trans(skb, dev);
1694 1698
1695 /* eth_type_trans() can set pkt_type.
1696 * call skb_scrub_packet() after it to clear pkt_type _after_ calling
1697 * eth_type_trans().
1698 */
1699 skb_scrub_packet(skb, true); 1699 skb_scrub_packet(skb, true);
1700 skb->protocol = eth_type_trans(skb, dev);
1700 1701
1701 return netif_rx(skb); 1702 return netif_rx(skb);
1702} 1703}
@@ -4995,7 +4996,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
4995{ 4996{
4996 const struct net_device_ops *ops = dev->netdev_ops; 4997 const struct net_device_ops *ops = dev->netdev_ops;
4997 4998
4998 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) 4999 if (ops->ndo_change_rx_flags)
4999 ops->ndo_change_rx_flags(dev, flags); 5000 ops->ndo_change_rx_flags(dev, flags);
5000} 5001}
5001 5002
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 5e78d44333b9..e70301eb7a4a 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = {
64 .hdrsize = 0, 64 .hdrsize = 0,
65 .name = "NET_DM", 65 .name = "NET_DM",
66 .version = 2, 66 .version = 2,
67 .maxattr = NET_DM_CMD_MAX,
68}; 67};
69 68
70static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); 69static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
@@ -106,6 +105,10 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
106 return skb; 105 return skb;
107} 106}
108 107
108static struct genl_multicast_group dropmon_mcgrps[] = {
109 { .name = "events", },
110};
111
109static void send_dm_alert(struct work_struct *work) 112static void send_dm_alert(struct work_struct *work)
110{ 113{
111 struct sk_buff *skb; 114 struct sk_buff *skb;
@@ -116,7 +119,8 @@ static void send_dm_alert(struct work_struct *work)
116 skb = reset_per_cpu_data(data); 119 skb = reset_per_cpu_data(data);
117 120
118 if (skb) 121 if (skb)
119 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 122 genlmsg_multicast(&net_drop_monitor_family, skb, 0,
123 0, GFP_KERNEL);
120} 124}
121 125
122/* 126/*
@@ -333,7 +337,7 @@ out:
333 return NOTIFY_DONE; 337 return NOTIFY_DONE;
334} 338}
335 339
336static struct genl_ops dropmon_ops[] = { 340static const struct genl_ops dropmon_ops[] = {
337 { 341 {
338 .cmd = NET_DM_CMD_CONFIG, 342 .cmd = NET_DM_CMD_CONFIG,
339 .doit = net_dm_cmd_config, 343 .doit = net_dm_cmd_config,
@@ -364,13 +368,13 @@ static int __init init_net_drop_monitor(void)
364 return -ENOSPC; 368 return -ENOSPC;
365 } 369 }
366 370
367 rc = genl_register_family_with_ops(&net_drop_monitor_family, 371 rc = genl_register_family_with_ops_groups(&net_drop_monitor_family,
368 dropmon_ops, 372 dropmon_ops, dropmon_mcgrps);
369 ARRAY_SIZE(dropmon_ops));
370 if (rc) { 373 if (rc) {
371 pr_err("Could not create drop monitor netlink family\n"); 374 pr_err("Could not create drop monitor netlink family\n");
372 return rc; 375 return rc;
373 } 376 }
377 WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
374 378
375 rc = register_netdevice_notifier(&dropmon_net_notifier); 379 rc = register_netdevice_notifier(&dropmon_net_notifier);
376 if (rc < 0) { 380 if (rc < 0) {
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 4cdb7c48dad6..b61869429f4c 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -48,7 +48,8 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
48 if (err < 0) 48 if (err < 0)
49 return err; 49 return err;
50 } 50 }
51 m->msg_name = address; 51 if (m->msg_name)
52 m->msg_name = address;
52 } else { 53 } else {
53 m->msg_name = NULL; 54 m->msg_name = NULL;
54 } 55 }
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 261357a66300..a797fff7f222 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2527,6 +2527,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
2527 if (x) { 2527 if (x) {
2528 int ret; 2528 int ret;
2529 __u8 *eth; 2529 __u8 *eth;
2530 struct iphdr *iph;
2531
2530 nhead = x->props.header_len - skb_headroom(skb); 2532 nhead = x->props.header_len - skb_headroom(skb);
2531 if (nhead > 0) { 2533 if (nhead > 0) {
2532 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 2534 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
@@ -2548,6 +2550,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
2548 eth = (__u8 *) skb_push(skb, ETH_HLEN); 2550 eth = (__u8 *) skb_push(skb, ETH_HLEN);
2549 memcpy(eth, pkt_dev->hh, 12); 2551 memcpy(eth, pkt_dev->hh, 12);
2550 *(u16 *) &eth[12] = protocol; 2552 *(u16 *) &eth[12] = protocol;
2553
2554 /* Update IPv4 header len as well as checksum value */
2555 iph = ip_hdr(skb);
2556 iph->tot_len = htons(skb->len - ETH_HLEN);
2557 ip_send_check(iph);
2551 } 2558 }
2552 } 2559 }
2553 return 1; 2560 return 1;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8cec1e6b844d..06e72d3cdf60 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2796,6 +2796,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2796 struct sk_buff *segs = NULL; 2796 struct sk_buff *segs = NULL;
2797 struct sk_buff *tail = NULL; 2797 struct sk_buff *tail = NULL;
2798 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2798 struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2799 skb_frag_t *skb_frag = skb_shinfo(skb)->frags;
2799 unsigned int mss = skb_shinfo(skb)->gso_size; 2800 unsigned int mss = skb_shinfo(skb)->gso_size;
2800 unsigned int doffset = skb->data - skb_mac_header(skb); 2801 unsigned int doffset = skb->data - skb_mac_header(skb);
2801 unsigned int offset = doffset; 2802 unsigned int offset = doffset;
@@ -2835,16 +2836,38 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2835 if (hsize > len || !sg) 2836 if (hsize > len || !sg)
2836 hsize = len; 2837 hsize = len;
2837 2838
2838 if (!hsize && i >= nfrags) { 2839 if (!hsize && i >= nfrags && skb_headlen(fskb) &&
2839 BUG_ON(fskb->len != len); 2840 (skb_headlen(fskb) == len || sg)) {
2841 BUG_ON(skb_headlen(fskb) > len);
2842
2843 i = 0;
2844 nfrags = skb_shinfo(fskb)->nr_frags;
2845 skb_frag = skb_shinfo(fskb)->frags;
2846 pos += skb_headlen(fskb);
2847
2848 while (pos < offset + len) {
2849 BUG_ON(i >= nfrags);
2850
2851 size = skb_frag_size(skb_frag);
2852 if (pos + size > offset + len)
2853 break;
2854
2855 i++;
2856 pos += size;
2857 skb_frag++;
2858 }
2840 2859
2841 pos += len;
2842 nskb = skb_clone(fskb, GFP_ATOMIC); 2860 nskb = skb_clone(fskb, GFP_ATOMIC);
2843 fskb = fskb->next; 2861 fskb = fskb->next;
2844 2862
2845 if (unlikely(!nskb)) 2863 if (unlikely(!nskb))
2846 goto err; 2864 goto err;
2847 2865
2866 if (unlikely(pskb_trim(nskb, len))) {
2867 kfree_skb(nskb);
2868 goto err;
2869 }
2870
2848 hsize = skb_end_offset(nskb); 2871 hsize = skb_end_offset(nskb);
2849 if (skb_cow_head(nskb, doffset + headroom)) { 2872 if (skb_cow_head(nskb, doffset + headroom)) {
2850 kfree_skb(nskb); 2873 kfree_skb(nskb);
@@ -2881,7 +2904,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2881 nskb->data - tnl_hlen, 2904 nskb->data - tnl_hlen,
2882 doffset + tnl_hlen); 2905 doffset + tnl_hlen);
2883 2906
2884 if (fskb != skb_shinfo(skb)->frag_list) 2907 if (nskb->len == len + doffset)
2885 goto perform_csum_check; 2908 goto perform_csum_check;
2886 2909
2887 if (!sg) { 2910 if (!sg) {
@@ -2899,8 +2922,28 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2899 2922
2900 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2923 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2901 2924
2902 while (pos < offset + len && i < nfrags) { 2925 while (pos < offset + len) {
2903 *frag = skb_shinfo(skb)->frags[i]; 2926 if (i >= nfrags) {
2927 BUG_ON(skb_headlen(fskb));
2928
2929 i = 0;
2930 nfrags = skb_shinfo(fskb)->nr_frags;
2931 skb_frag = skb_shinfo(fskb)->frags;
2932
2933 BUG_ON(!nfrags);
2934
2935 fskb = fskb->next;
2936 }
2937
2938 if (unlikely(skb_shinfo(nskb)->nr_frags >=
2939 MAX_SKB_FRAGS)) {
2940 net_warn_ratelimited(
2941 "skb_segment: too many frags: %u %u\n",
2942 pos, mss);
2943 goto err;
2944 }
2945
2946 *frag = *skb_frag;
2904 __skb_frag_ref(frag); 2947 __skb_frag_ref(frag);
2905 size = skb_frag_size(frag); 2948 size = skb_frag_size(frag);
2906 2949
@@ -2913,6 +2956,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2913 2956
2914 if (pos + size <= offset + len) { 2957 if (pos + size <= offset + len) {
2915 i++; 2958 i++;
2959 skb_frag++;
2916 pos += size; 2960 pos += size;
2917 } else { 2961 } else {
2918 skb_frag_size_sub(frag, pos + size - (offset + len)); 2962 skb_frag_size_sub(frag, pos + size - (offset + len));
@@ -2922,25 +2966,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2922 frag++; 2966 frag++;
2923 } 2967 }
2924 2968
2925 if (pos < offset + len) {
2926 struct sk_buff *fskb2 = fskb;
2927
2928 BUG_ON(pos + fskb->len != offset + len);
2929
2930 pos += fskb->len;
2931 fskb = fskb->next;
2932
2933 if (fskb2->next) {
2934 fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2935 if (!fskb2)
2936 goto err;
2937 } else
2938 skb_get(fskb2);
2939
2940 SKB_FRAG_ASSERT(nskb);
2941 skb_shinfo(nskb)->frag_list = fskb2;
2942 }
2943
2944skip_fraglist: 2969skip_fraglist:
2945 nskb->data_len = len - hsize; 2970 nskb->data_len = len - hsize;
2946 nskb->len += nskb->data_len; 2971 nskb->len += nskb->data_len;
@@ -3559,6 +3584,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3559 skb->tstamp.tv64 = 0; 3584 skb->tstamp.tv64 = 0;
3560 skb->pkt_type = PACKET_HOST; 3585 skb->pkt_type = PACKET_HOST;
3561 skb->skb_iif = 0; 3586 skb->skb_iif = 0;
3587 skb->local_df = 0;
3562 skb_dst_drop(skb); 3588 skb_dst_drop(skb);
3563 skb->mark = 0; 3589 skb->mark = 0;
3564 secpath_reset(skb); 3590 secpath_reset(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index ab20ed9b0f31..5393b4b719d7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -882,7 +882,7 @@ set_rcvbuf:
882 882
883 case SO_PEEK_OFF: 883 case SO_PEEK_OFF:
884 if (sock->ops->set_peek_off) 884 if (sock->ops->set_peek_off)
885 sock->ops->set_peek_off(sk, val); 885 ret = sock->ops->set_peek_off(sk, val);
886 else 886 else
887 ret = -EOPNOTSUPP; 887 ret = -EOPNOTSUPP;
888 break; 888 break;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4ac71ff7c2e4..2b90a786e475 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -851,7 +851,6 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
851 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 851 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
852 if (flowlabel == NULL) 852 if (flowlabel == NULL)
853 return -EINVAL; 853 return -EINVAL;
854 usin->sin6_addr = flowlabel->dst;
855 fl6_sock_release(flowlabel); 854 fl6_sock_release(flowlabel);
856 } 855 }
857 } 856 }
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 003f5bb3acd2..4bdab1521878 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -288,7 +288,8 @@ void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
288static bool seq_nr_after(u16 a, u16 b) 288static bool seq_nr_after(u16 a, u16 b)
289{ 289{
290 /* Remove inconsistency where 290 /* Remove inconsistency where
291 * seq_nr_after(a, b) == seq_nr_before(a, b) */ 291 * seq_nr_after(a, b) == seq_nr_before(a, b)
292 */
292 if ((int) b - a == 32768) 293 if ((int) b - a == 32768)
293 return false; 294 return false;
294 295
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 4e66bf61f585..01a5261ac7a5 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -23,6 +23,8 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, 23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, 24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, 25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
26 [IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
27 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
26}; 28};
27 29
28 30
@@ -59,6 +61,31 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
59 return hsr_dev_finalize(dev, link, multicast_spec); 61 return hsr_dev_finalize(dev, link, multicast_spec);
60} 62}
61 63
64static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
65{
66 struct hsr_priv *hsr_priv;
67
68 hsr_priv = netdev_priv(dev);
69
70 if (hsr_priv->slave[0])
71 if (nla_put_u32(skb, IFLA_HSR_SLAVE1, hsr_priv->slave[0]->ifindex))
72 goto nla_put_failure;
73
74 if (hsr_priv->slave[1])
75 if (nla_put_u32(skb, IFLA_HSR_SLAVE2, hsr_priv->slave[1]->ifindex))
76 goto nla_put_failure;
77
78 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
79 hsr_priv->sup_multicast_addr) ||
80 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr_priv->sequence_nr))
81 goto nla_put_failure;
82
83 return 0;
84
85nla_put_failure:
86 return -EMSGSIZE;
87}
88
62static struct rtnl_link_ops hsr_link_ops __read_mostly = { 89static struct rtnl_link_ops hsr_link_ops __read_mostly = {
63 .kind = "hsr", 90 .kind = "hsr",
64 .maxtype = IFLA_HSR_MAX, 91 .maxtype = IFLA_HSR_MAX,
@@ -66,6 +93,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
66 .priv_size = sizeof(struct hsr_priv), 93 .priv_size = sizeof(struct hsr_priv),
67 .setup = hsr_dev_setup, 94 .setup = hsr_dev_setup,
68 .newlink = hsr_newlink, 95 .newlink = hsr_newlink,
96 .fill_info = hsr_fill_info,
69}; 97};
70 98
71 99
@@ -90,8 +118,8 @@ static struct genl_family hsr_genl_family = {
90 .maxattr = HSR_A_MAX, 118 .maxattr = HSR_A_MAX,
91}; 119};
92 120
93static struct genl_multicast_group hsr_network_genl_mcgrp = { 121static const struct genl_multicast_group hsr_mcgrps[] = {
94 .name = "hsr-network", 122 { .name = "hsr-network", },
95}; 123};
96 124
97 125
@@ -129,7 +157,7 @@ void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
129 goto nla_put_failure; 157 goto nla_put_failure;
130 158
131 genlmsg_end(skb, msg_head); 159 genlmsg_end(skb, msg_head);
132 genlmsg_multicast(skb, 0, hsr_network_genl_mcgrp.id, GFP_ATOMIC); 160 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
133 161
134 return; 162 return;
135 163
@@ -163,7 +191,7 @@ void hsr_nl_nodedown(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN])
163 goto nla_put_failure; 191 goto nla_put_failure;
164 192
165 genlmsg_end(skb, msg_head); 193 genlmsg_end(skb, msg_head);
166 genlmsg_multicast(skb, 0, hsr_network_genl_mcgrp.id, GFP_ATOMIC); 194 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
167 195
168 return; 196 return;
169 197
@@ -249,7 +277,7 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
249 &hsr_node_if2_age, 277 &hsr_node_if2_age,
250 &hsr_node_if2_seq); 278 &hsr_node_if2_seq);
251 if (res < 0) 279 if (res < 0)
252 goto fail; 280 goto nla_put_failure;
253 281
254 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, 282 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
255 nla_data(info->attrs[HSR_A_NODE_ADDR])); 283 nla_data(info->attrs[HSR_A_NODE_ADDR]));
@@ -306,15 +334,6 @@ fail:
306 return res; 334 return res;
307} 335}
308 336
309static struct genl_ops hsr_ops_get_node_status = {
310 .cmd = HSR_C_GET_NODE_STATUS,
311 .flags = 0,
312 .policy = hsr_genl_policy,
313 .doit = hsr_get_node_status,
314 .dumpit = NULL,
315};
316
317
318/* Get a list of MacAddressA of all nodes known to this node (other than self). 337/* Get a list of MacAddressA of all nodes known to this node (other than self).
319 */ 338 */
320static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) 339static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
@@ -398,12 +417,21 @@ fail:
398} 417}
399 418
400 419
401static struct genl_ops hsr_ops_get_node_list = { 420static const struct genl_ops hsr_ops[] = {
402 .cmd = HSR_C_GET_NODE_LIST, 421 {
403 .flags = 0, 422 .cmd = HSR_C_GET_NODE_STATUS,
404 .policy = hsr_genl_policy, 423 .flags = 0,
405 .doit = hsr_get_node_list, 424 .policy = hsr_genl_policy,
406 .dumpit = NULL, 425 .doit = hsr_get_node_status,
426 .dumpit = NULL,
427 },
428 {
429 .cmd = HSR_C_GET_NODE_LIST,
430 .flags = 0,
431 .policy = hsr_genl_policy,
432 .doit = hsr_get_node_list,
433 .dumpit = NULL,
434 },
407}; 435};
408 436
409int __init hsr_netlink_init(void) 437int __init hsr_netlink_init(void)
@@ -414,30 +442,13 @@ int __init hsr_netlink_init(void)
414 if (rc) 442 if (rc)
415 goto fail_rtnl_link_register; 443 goto fail_rtnl_link_register;
416 444
417 rc = genl_register_family(&hsr_genl_family); 445 rc = genl_register_family_with_ops_groups(&hsr_genl_family, hsr_ops,
446 hsr_mcgrps);
418 if (rc) 447 if (rc)
419 goto fail_genl_register_family; 448 goto fail_genl_register_family;
420 449
421 rc = genl_register_ops(&hsr_genl_family, &hsr_ops_get_node_status);
422 if (rc)
423 goto fail_genl_register_ops;
424
425 rc = genl_register_ops(&hsr_genl_family, &hsr_ops_get_node_list);
426 if (rc)
427 goto fail_genl_register_ops_node_list;
428
429 rc = genl_register_mc_group(&hsr_genl_family, &hsr_network_genl_mcgrp);
430 if (rc)
431 goto fail_genl_register_mc_group;
432
433 return 0; 450 return 0;
434 451
435fail_genl_register_mc_group:
436 genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_list);
437fail_genl_register_ops_node_list:
438 genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_status);
439fail_genl_register_ops:
440 genl_unregister_family(&hsr_genl_family);
441fail_genl_register_family: 452fail_genl_register_family:
442 rtnl_link_unregister(&hsr_link_ops); 453 rtnl_link_unregister(&hsr_link_ops);
443fail_rtnl_link_register: 454fail_rtnl_link_register:
@@ -447,10 +458,7 @@ fail_rtnl_link_register:
447 458
448void __exit hsr_netlink_exit(void) 459void __exit hsr_netlink_exit(void)
449{ 460{
450 genl_unregister_mc_group(&hsr_genl_family, &hsr_network_genl_mcgrp);
451 genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_status);
452 genl_unregister_family(&hsr_genl_family); 461 genl_unregister_family(&hsr_genl_family);
453
454 rtnl_link_unregister(&hsr_link_ops); 462 rtnl_link_unregister(&hsr_link_ops);
455} 463}
456 464
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 426b5df1c98f..459e200c08a4 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -956,7 +956,7 @@ lowpan_process_data(struct sk_buff *skb)
956 * Traffic class carried in-line 956 * Traffic class carried in-line
957 * ECN + DSCP (1 byte), Flow Label is elided 957 * ECN + DSCP (1 byte), Flow Label is elided
958 */ 958 */
959 case 1: /* 10b */ 959 case 2: /* 10b */
960 if (lowpan_fetch_skb_u8(skb, &tmp)) 960 if (lowpan_fetch_skb_u8(skb, &tmp))
961 goto drop; 961 goto drop;
962 962
@@ -967,7 +967,7 @@ lowpan_process_data(struct sk_buff *skb)
967 * Flow Label carried in-line 967 * Flow Label carried in-line
968 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided 968 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
969 */ 969 */
970 case 2: /* 01b */ 970 case 1: /* 01b */
971 if (lowpan_fetch_skb_u8(skb, &tmp)) 971 if (lowpan_fetch_skb_u8(skb, &tmp))
972 goto drop; 972 goto drop;
973 973
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 581a59504bd5..1865fdf5a5a5 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -315,9 +315,8 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
315 if (saddr) { 315 if (saddr) {
316 saddr->family = AF_IEEE802154; 316 saddr->family = AF_IEEE802154;
317 saddr->addr = mac_cb(skb)->sa; 317 saddr->addr = mac_cb(skb)->sa;
318 }
319 if (addr_len)
320 *addr_len = sizeof(*saddr); 318 *addr_len = sizeof(*saddr);
319 }
321 320
322 if (flags & MSG_TRUNC) 321 if (flags & MSG_TRUNC)
323 copied = skb->len; 322 copied = skb->len;
diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h
index aadec428e6ec..cee4425b9956 100644
--- a/net/ieee802154/ieee802154.h
+++ b/net/ieee802154/ieee802154.h
@@ -47,7 +47,24 @@ struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
47int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info); 47int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info);
48 48
49extern struct genl_family nl802154_family; 49extern struct genl_family nl802154_family;
50int nl802154_mac_register(void); 50
51int nl802154_phy_register(void); 51/* genetlink ops/groups */
52int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info);
53int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb);
54int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info);
55int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info);
56
57enum ieee802154_mcgrp_ids {
58 IEEE802154_COORD_MCGRP,
59 IEEE802154_BEACON_MCGRP,
60};
61
62int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info);
63int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info);
64int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info);
65int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info);
66int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info);
67int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info);
68int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb);
52 69
53#endif 70#endif
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 7e49bbcc6967..43f1b2bf469f 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -70,7 +70,7 @@ int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group)
70 if (genlmsg_end(msg, hdr) < 0) 70 if (genlmsg_end(msg, hdr) < 0)
71 goto out; 71 goto out;
72 72
73 return genlmsg_multicast(msg, 0, group, GFP_ATOMIC); 73 return genlmsg_multicast(&nl802154_family, msg, 0, group, GFP_ATOMIC);
74out: 74out:
75 nlmsg_free(msg); 75 nlmsg_free(msg);
76 return -ENOBUFS; 76 return -ENOBUFS;
@@ -109,31 +109,36 @@ out:
109 return -ENOBUFS; 109 return -ENOBUFS;
110} 110}
111 111
112int __init ieee802154_nl_init(void) 112static const struct genl_ops ieee8021154_ops[] = {
113{ 113 /* see nl-phy.c */
114 int rc; 114 IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy,
115 115 ieee802154_dump_phy),
116 rc = genl_register_family(&nl802154_family); 116 IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface),
117 if (rc) 117 IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface),
118 goto fail; 118 /* see nl-mac.c */
119 119 IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
120 rc = nl802154_mac_register(); 120 IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
121 if (rc) 121 IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
122 goto fail; 122 IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
123 IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
124 IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
125 ieee802154_dump_iface),
126};
123 127
124 rc = nl802154_phy_register(); 128static const struct genl_multicast_group ieee802154_mcgrps[] = {
125 if (rc) 129 [IEEE802154_COORD_MCGRP] = { .name = IEEE802154_MCAST_COORD_NAME, },
126 goto fail; 130 [IEEE802154_BEACON_MCGRP] = { .name = IEEE802154_MCAST_BEACON_NAME, },
131};
127 132
128 return 0;
129 133
130fail: 134int __init ieee802154_nl_init(void)
131 genl_unregister_family(&nl802154_family); 135{
132 return rc; 136 return genl_register_family_with_ops_groups(&nl802154_family,
137 ieee8021154_ops,
138 ieee802154_mcgrps);
133} 139}
134 140
135void __exit ieee802154_nl_exit(void) 141void __exit ieee802154_nl_exit(void)
136{ 142{
137 genl_unregister_family(&nl802154_family); 143 genl_unregister_family(&nl802154_family);
138} 144}
139
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index b0bdd8c51e9c..ba5c1e002f37 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -39,14 +39,6 @@
39 39
40#include "ieee802154.h" 40#include "ieee802154.h"
41 41
42static struct genl_multicast_group ieee802154_coord_mcgrp = {
43 .name = IEEE802154_MCAST_COORD_NAME,
44};
45
46static struct genl_multicast_group ieee802154_beacon_mcgrp = {
47 .name = IEEE802154_MCAST_BEACON_NAME,
48};
49
50int ieee802154_nl_assoc_indic(struct net_device *dev, 42int ieee802154_nl_assoc_indic(struct net_device *dev,
51 struct ieee802154_addr *addr, u8 cap) 43 struct ieee802154_addr *addr, u8 cap)
52{ 44{
@@ -72,7 +64,7 @@ int ieee802154_nl_assoc_indic(struct net_device *dev,
72 nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap)) 64 nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap))
73 goto nla_put_failure; 65 goto nla_put_failure;
74 66
75 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 67 return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
76 68
77nla_put_failure: 69nla_put_failure:
78 nlmsg_free(msg); 70 nlmsg_free(msg);
@@ -98,7 +90,7 @@ int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
98 nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) || 90 nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
99 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) 91 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
100 goto nla_put_failure; 92 goto nla_put_failure;
101 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 93 return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
102 94
103nla_put_failure: 95nla_put_failure:
104 nlmsg_free(msg); 96 nlmsg_free(msg);
@@ -133,7 +125,7 @@ int ieee802154_nl_disassoc_indic(struct net_device *dev,
133 } 125 }
134 if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason)) 126 if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason))
135 goto nla_put_failure; 127 goto nla_put_failure;
136 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 128 return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
137 129
138nla_put_failure: 130nla_put_failure:
139 nlmsg_free(msg); 131 nlmsg_free(msg);
@@ -157,7 +149,7 @@ int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
157 dev->dev_addr) || 149 dev->dev_addr) ||
158 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) 150 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
159 goto nla_put_failure; 151 goto nla_put_failure;
160 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 152 return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
161 153
162nla_put_failure: 154nla_put_failure:
163 nlmsg_free(msg); 155 nlmsg_free(msg);
@@ -183,7 +175,7 @@ int ieee802154_nl_beacon_indic(struct net_device *dev,
183 nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) || 175 nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) ||
184 nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid)) 176 nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid))
185 goto nla_put_failure; 177 goto nla_put_failure;
186 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 178 return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
187 179
188nla_put_failure: 180nla_put_failure:
189 nlmsg_free(msg); 181 nlmsg_free(msg);
@@ -214,7 +206,7 @@ int ieee802154_nl_scan_confirm(struct net_device *dev,
214 (edl && 206 (edl &&
215 nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl))) 207 nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl)))
216 goto nla_put_failure; 208 goto nla_put_failure;
217 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 209 return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
218 210
219nla_put_failure: 211nla_put_failure:
220 nlmsg_free(msg); 212 nlmsg_free(msg);
@@ -238,7 +230,7 @@ int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
238 dev->dev_addr) || 230 dev->dev_addr) ||
239 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) 231 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
240 goto nla_put_failure; 232 goto nla_put_failure;
241 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 233 return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
242 234
243nla_put_failure: 235nla_put_failure:
244 nlmsg_free(msg); 236 nlmsg_free(msg);
@@ -309,8 +301,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
309 return dev; 301 return dev;
310} 302}
311 303
312static int ieee802154_associate_req(struct sk_buff *skb, 304int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info)
313 struct genl_info *info)
314{ 305{
315 struct net_device *dev; 306 struct net_device *dev;
316 struct ieee802154_addr addr; 307 struct ieee802154_addr addr;
@@ -357,8 +348,7 @@ out:
357 return ret; 348 return ret;
358} 349}
359 350
360static int ieee802154_associate_resp(struct sk_buff *skb, 351int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info)
361 struct genl_info *info)
362{ 352{
363 struct net_device *dev; 353 struct net_device *dev;
364 struct ieee802154_addr addr; 354 struct ieee802154_addr addr;
@@ -390,8 +380,7 @@ out:
390 return ret; 380 return ret;
391} 381}
392 382
393static int ieee802154_disassociate_req(struct sk_buff *skb, 383int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
394 struct genl_info *info)
395{ 384{
396 struct net_device *dev; 385 struct net_device *dev;
397 struct ieee802154_addr addr; 386 struct ieee802154_addr addr;
@@ -433,7 +422,7 @@ out:
433 * PAN_coordinator, battery_life_extension = 0, 422 * PAN_coordinator, battery_life_extension = 0,
434 * coord_realignment = 0, security_enable = 0 423 * coord_realignment = 0, security_enable = 0
435*/ 424*/
436static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) 425int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
437{ 426{
438 struct net_device *dev; 427 struct net_device *dev;
439 struct ieee802154_addr addr; 428 struct ieee802154_addr addr;
@@ -492,7 +481,7 @@ out:
492 return ret; 481 return ret;
493} 482}
494 483
495static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) 484int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
496{ 485{
497 struct net_device *dev; 486 struct net_device *dev;
498 int ret = -EOPNOTSUPP; 487 int ret = -EOPNOTSUPP;
@@ -530,8 +519,7 @@ out:
530 return ret; 519 return ret;
531} 520}
532 521
533static int ieee802154_list_iface(struct sk_buff *skb, 522int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info)
534 struct genl_info *info)
535{ 523{
536 /* Request for interface name, index, type, IEEE address, 524 /* Request for interface name, index, type, IEEE address,
537 PAN Id, short address */ 525 PAN Id, short address */
@@ -565,8 +553,7 @@ out_dev:
565 553
566} 554}
567 555
568static int ieee802154_dump_iface(struct sk_buff *skb, 556int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
569 struct netlink_callback *cb)
570{ 557{
571 struct net *net = sock_net(skb->sk); 558 struct net *net = sock_net(skb->sk);
572 struct net_device *dev; 559 struct net_device *dev;
@@ -590,41 +577,3 @@ cont:
590 577
591 return skb->len; 578 return skb->len;
592} 579}
593
594static struct genl_ops ieee802154_coordinator_ops[] = {
595 IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
596 IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
597 IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
598 IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
599 IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
600 IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
601 ieee802154_dump_iface),
602};
603
604/*
605 * No need to unregister as family unregistration will do it.
606 */
607int nl802154_mac_register(void)
608{
609 int i;
610 int rc;
611
612 rc = genl_register_mc_group(&nl802154_family,
613 &ieee802154_coord_mcgrp);
614 if (rc)
615 return rc;
616
617 rc = genl_register_mc_group(&nl802154_family,
618 &ieee802154_beacon_mcgrp);
619 if (rc)
620 return rc;
621
622 for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) {
623 rc = genl_register_ops(&nl802154_family,
624 &ieee802154_coordinator_ops[i]);
625 if (rc)
626 return rc;
627 }
628
629 return 0;
630}
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 22b1a7058fd3..d08c7a43dcd1 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -77,8 +77,7 @@ out:
77 return -EMSGSIZE; 77 return -EMSGSIZE;
78} 78}
79 79
80static int ieee802154_list_phy(struct sk_buff *skb, 80int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
81 struct genl_info *info)
82{ 81{
83 /* Request for interface name, index, type, IEEE address, 82 /* Request for interface name, index, type, IEEE address,
84 PAN Id, short address */ 83 PAN Id, short address */
@@ -151,8 +150,7 @@ static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
151 return 0; 150 return 0;
152} 151}
153 152
154static int ieee802154_dump_phy(struct sk_buff *skb, 153int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb)
155 struct netlink_callback *cb)
156{ 154{
157 struct dump_phy_data data = { 155 struct dump_phy_data data = {
158 .cb = cb, 156 .cb = cb,
@@ -170,8 +168,7 @@ static int ieee802154_dump_phy(struct sk_buff *skb,
170 return skb->len; 168 return skb->len;
171} 169}
172 170
173static int ieee802154_add_iface(struct sk_buff *skb, 171int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
174 struct genl_info *info)
175{ 172{
176 struct sk_buff *msg; 173 struct sk_buff *msg;
177 struct wpan_phy *phy; 174 struct wpan_phy *phy;
@@ -273,8 +270,7 @@ out_dev:
273 return rc; 270 return rc;
274} 271}
275 272
276static int ieee802154_del_iface(struct sk_buff *skb, 273int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info)
277 struct genl_info *info)
278{ 274{
279 struct sk_buff *msg; 275 struct sk_buff *msg;
280 struct wpan_phy *phy; 276 struct wpan_phy *phy;
@@ -356,28 +352,3 @@ out_dev:
356 352
357 return rc; 353 return rc;
358} 354}
359
360static struct genl_ops ieee802154_phy_ops[] = {
361 IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy,
362 ieee802154_dump_phy),
363 IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface),
364 IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface),
365};
366
367/*
368 * No need to unregister as family unregistration will do it.
369 */
370int nl802154_phy_register(void)
371{
372 int i;
373 int rc;
374
375 for (i = 0; i < ARRAY_SIZE(ieee802154_phy_ops); i++) {
376 rc = genl_register_ops(&nl802154_family,
377 &ieee802154_phy_ops[i]);
378 if (rc)
379 return rc;
380 }
381
382 return 0;
383}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index b28e863fe0a7..19e36376d2a0 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -57,7 +57,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
57 if (IS_ERR(rt)) { 57 if (IS_ERR(rt)) {
58 err = PTR_ERR(rt); 58 err = PTR_ERR(rt);
59 if (err == -ENETUNREACH) 59 if (err == -ENETUNREACH)
60 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 60 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
61 goto out; 61 goto out;
62 } 62 }
63 63
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 523be38e37de..f2e15738534d 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -104,7 +104,10 @@ errout:
104static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) 104static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
105{ 105{
106 struct fib_result *result = (struct fib_result *) arg->result; 106 struct fib_result *result = (struct fib_result *) arg->result;
107 struct net_device *dev = result->fi->fib_dev; 107 struct net_device *dev = NULL;
108
109 if (result->fi)
110 dev = result->fi->fib_dev;
108 111
109 /* do not accept result if the route does 112 /* do not accept result if the route does
110 * not meet the required prefix length 113 * not meet the required prefix length
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3f858266fa7e..ddf32a6bc415 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -386,7 +386,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
386/* 386/*
387 * Handle MSG_ERRQUEUE 387 * Handle MSG_ERRQUEUE
388 */ 388 */
389int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) 389int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
390{ 390{
391 struct sock_exterr_skb *serr; 391 struct sock_exterr_skb *serr;
392 struct sk_buff *skb, *skb2; 392 struct sk_buff *skb, *skb2;
@@ -423,6 +423,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
423 serr->addr_offset); 423 serr->addr_offset);
424 sin->sin_port = serr->port; 424 sin->sin_port = serr->port;
425 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 425 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
426 *addr_len = sizeof(*sin);
426 } 427 }
427 428
428 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 429 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index caf01176a5e4..90ff9570d7d4 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -454,6 +454,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
454 tstats->rx_bytes += skb->len; 454 tstats->rx_bytes += skb->len;
455 u64_stats_update_end(&tstats->syncp); 455 u64_stats_update_end(&tstats->syncp);
456 456
457 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
458
457 if (tunnel->dev->type == ARPHRD_ETHER) { 459 if (tunnel->dev->type == ARPHRD_ETHER) {
458 skb->protocol = eth_type_trans(skb, tunnel->dev); 460 skb->protocol = eth_type_trans(skb, tunnel->dev);
459 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 461 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
@@ -461,8 +463,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
461 skb->dev = tunnel->dev; 463 skb->dev = tunnel->dev;
462 } 464 }
463 465
464 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
465
466 gro_cells_receive(&tunnel->gro_cells, skb); 466 gro_cells_receive(&tunnel->gro_cells, skb);
467 return 0; 467 return 0;
468 468
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 5d9c845d288a..52b802a0cd8c 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -126,6 +126,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
126 if (!rt->dst.xfrm || 126 if (!rt->dst.xfrm ||
127 rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) { 127 rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
128 dev->stats.tx_carrier_errors++; 128 dev->stats.tx_carrier_errors++;
129 ip_rt_put(rt);
129 goto tx_error_icmp; 130 goto tx_error_icmp;
130 } 131 }
131 tdev = rt->dst.dev; 132 tdev = rt->dst.dev;
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 01cffeaa0085..f13bd91d9a56 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -244,6 +244,7 @@ synproxy_recv_client_ack(const struct synproxy_net *snet,
244 244
245 this_cpu_inc(snet->stats->cookie_valid); 245 this_cpu_inc(snet->stats->cookie_valid);
246 opts->mss = mss; 246 opts->mss = mss;
247 opts->options |= XT_SYNPROXY_OPT_MSS;
247 248
248 if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) 249 if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
249 synproxy_check_timestamp_cookie(opts); 250 synproxy_check_timestamp_cookie(opts);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index cbc85f660d54..242e7f4ed6f4 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -772,7 +772,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
772 err = PTR_ERR(rt); 772 err = PTR_ERR(rt);
773 rt = NULL; 773 rt = NULL;
774 if (err == -ENETUNREACH) 774 if (err == -ENETUNREACH)
775 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 775 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
776 goto out; 776 goto out;
777 } 777 }
778 778
@@ -830,8 +830,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
830{ 830{
831 struct inet_sock *isk = inet_sk(sk); 831 struct inet_sock *isk = inet_sk(sk);
832 int family = sk->sk_family; 832 int family = sk->sk_family;
833 struct sockaddr_in *sin;
834 struct sockaddr_in6 *sin6;
835 struct sk_buff *skb; 833 struct sk_buff *skb;
836 int copied, err; 834 int copied, err;
837 835
@@ -841,19 +839,13 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
841 if (flags & MSG_OOB) 839 if (flags & MSG_OOB)
842 goto out; 840 goto out;
843 841
844 if (addr_len) {
845 if (family == AF_INET)
846 *addr_len = sizeof(*sin);
847 else if (family == AF_INET6 && addr_len)
848 *addr_len = sizeof(*sin6);
849 }
850
851 if (flags & MSG_ERRQUEUE) { 842 if (flags & MSG_ERRQUEUE) {
852 if (family == AF_INET) { 843 if (family == AF_INET) {
853 return ip_recv_error(sk, msg, len); 844 return ip_recv_error(sk, msg, len, addr_len);
854#if IS_ENABLED(CONFIG_IPV6) 845#if IS_ENABLED(CONFIG_IPV6)
855 } else if (family == AF_INET6) { 846 } else if (family == AF_INET6) {
856 return pingv6_ops.ipv6_recv_error(sk, msg, len); 847 return pingv6_ops.ipv6_recv_error(sk, msg, len,
848 addr_len);
857#endif 849#endif
858 } 850 }
859 } 851 }
@@ -877,11 +869,15 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
877 869
878 /* Copy the address and add cmsg data. */ 870 /* Copy the address and add cmsg data. */
879 if (family == AF_INET) { 871 if (family == AF_INET) {
880 sin = (struct sockaddr_in *) msg->msg_name; 872 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
881 sin->sin_family = AF_INET; 873
882 sin->sin_port = 0 /* skb->h.uh->source */; 874 if (sin) {
883 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 875 sin->sin_family = AF_INET;
884 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 876 sin->sin_port = 0 /* skb->h.uh->source */;
877 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
878 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
879 *addr_len = sizeof(*sin);
880 }
885 881
886 if (isk->cmsg_flags) 882 if (isk->cmsg_flags)
887 ip_cmsg_recv(msg, skb); 883 ip_cmsg_recv(msg, skb);
@@ -890,17 +886,21 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
890 } else if (family == AF_INET6) { 886 } else if (family == AF_INET6) {
891 struct ipv6_pinfo *np = inet6_sk(sk); 887 struct ipv6_pinfo *np = inet6_sk(sk);
892 struct ipv6hdr *ip6 = ipv6_hdr(skb); 888 struct ipv6hdr *ip6 = ipv6_hdr(skb);
893 sin6 = (struct sockaddr_in6 *) msg->msg_name; 889 struct sockaddr_in6 *sin6 =
894 sin6->sin6_family = AF_INET6; 890 (struct sockaddr_in6 *)msg->msg_name;
895 sin6->sin6_port = 0; 891
896 sin6->sin6_addr = ip6->saddr; 892 if (sin6) {
897 893 sin6->sin6_family = AF_INET6;
898 sin6->sin6_flowinfo = 0; 894 sin6->sin6_port = 0;
899 if (np->sndflow) 895 sin6->sin6_addr = ip6->saddr;
900 sin6->sin6_flowinfo = ip6_flowinfo(ip6); 896 sin6->sin6_flowinfo = 0;
901 897 if (np->sndflow)
902 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, 898 sin6->sin6_flowinfo = ip6_flowinfo(ip6);
903 IP6CB(skb)->iif); 899 sin6->sin6_scope_id =
900 ipv6_iface_scope_id(&sin6->sin6_addr,
901 IP6CB(skb)->iif);
902 *addr_len = sizeof(*sin6);
903 }
904 904
905 if (inet6_sk(sk)->rxopt.all) 905 if (inet6_sk(sk)->rxopt.all)
906 pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb); 906 pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index ce848461acbb..46d6a1c923a8 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -31,10 +31,6 @@
31const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; 31const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
32const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; 32const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
33 33
34/*
35 * Add a protocol handler to the hash tables
36 */
37
38int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) 34int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
39{ 35{
40 if (!prot->netns_ok) { 36 if (!prot->netns_ok) {
@@ -55,10 +51,6 @@ int inet_add_offload(const struct net_offload *prot, unsigned char protocol)
55} 51}
56EXPORT_SYMBOL(inet_add_offload); 52EXPORT_SYMBOL(inet_add_offload);
57 53
58/*
59 * Remove a protocol from the hash tables.
60 */
61
62int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) 54int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
63{ 55{
64 int ret; 56 int ret;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 41e1d2845c8f..23c3e5b5bb53 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -696,11 +696,8 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
696 if (flags & MSG_OOB) 696 if (flags & MSG_OOB)
697 goto out; 697 goto out;
698 698
699 if (addr_len)
700 *addr_len = sizeof(*sin);
701
702 if (flags & MSG_ERRQUEUE) { 699 if (flags & MSG_ERRQUEUE) {
703 err = ip_recv_error(sk, msg, len); 700 err = ip_recv_error(sk, msg, len, addr_len);
704 goto out; 701 goto out;
705 } 702 }
706 703
@@ -726,6 +723,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
726 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 723 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
727 sin->sin_port = 0; 724 sin->sin_port = 0;
728 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 725 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
726 *addr_len = sizeof(*sin);
729 } 727 }
730 if (inet->cmsg_flags) 728 if (inet->cmsg_flags)
731 ip_cmsg_recv(msg, skb); 729 ip_cmsg_recv(msg, skb);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f428935c50db..f8da28278014 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1776,8 +1776,12 @@ local_input:
1776 rth->dst.error= -err; 1776 rth->dst.error= -err;
1777 rth->rt_flags &= ~RTCF_LOCAL; 1777 rth->rt_flags &= ~RTCF_LOCAL;
1778 } 1778 }
1779 if (do_cache) 1779 if (do_cache) {
1780 rt_cache_route(&FIB_RES_NH(res), rth); 1780 if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
1781 rth->dst.flags |= DST_NOCACHE;
1782 rt_add_uncached_list(rth);
1783 }
1784 }
1781 skb_dst_set(skb, &rth->dst); 1785 skb_dst_set(skb, &rth->dst);
1782 err = 0; 1786 err = 0;
1783 goto out; 1787 goto out;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8e8529d3c8c9..c4638e6f0238 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -808,12 +808,6 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
808 xmit_size_goal = min_t(u32, gso_size, 808 xmit_size_goal = min_t(u32, gso_size,
809 sk->sk_gso_max_size - 1 - hlen); 809 sk->sk_gso_max_size - 1 - hlen);
810 810
811 /* TSQ : try to have at least two segments in flight
812 * (one in NIC TX ring, another in Qdisc)
813 */
814 xmit_size_goal = min_t(u32, xmit_size_goal,
815 sysctl_tcp_limit_output_bytes >> 1);
816
817 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); 811 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
818 812
819 /* We try hard to avoid divides here */ 813 /* We try hard to avoid divides here */
@@ -1431,7 +1425,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
1431 do { 1425 do {
1432 if (dma_async_is_tx_complete(tp->ucopy.dma_chan, 1426 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1433 last_issued, &done, 1427 last_issued, &done,
1434 &used) == DMA_SUCCESS) { 1428 &used) == DMA_COMPLETE) {
1435 /* Safe to free early-copied skbs now */ 1429 /* Safe to free early-copied skbs now */
1436 __skb_queue_purge(&sk->sk_async_wait_queue); 1430 __skb_queue_purge(&sk->sk_async_wait_queue);
1437 break; 1431 break;
@@ -1439,7 +1433,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
1439 struct sk_buff *skb; 1433 struct sk_buff *skb;
1440 while ((skb = skb_peek(&sk->sk_async_wait_queue)) && 1434 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1441 (dma_async_is_complete(skb->dma_cookie, done, 1435 (dma_async_is_complete(skb->dma_cookie, done,
1442 used) == DMA_SUCCESS)) { 1436 used) == DMA_COMPLETE)) {
1443 __skb_dequeue(&sk->sk_async_wait_queue); 1437 __skb_dequeue(&sk->sk_async_wait_queue);
1444 kfree_skb(skb); 1438 kfree_skb(skb);
1445 } 1439 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 59a6f8b90cd9..067213924751 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -177,7 +177,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
177 if (IS_ERR(rt)) { 177 if (IS_ERR(rt)) {
178 err = PTR_ERR(rt); 178 err = PTR_ERR(rt);
179 if (err == -ENETUNREACH) 179 if (err == -ENETUNREACH)
180 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
181 return err; 181 return err;
182 } 182 }
183 183
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 03e9154f7e68..f7e522c558ba 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -6,13 +6,6 @@
6#include <linux/memcontrol.h> 6#include <linux/memcontrol.h>
7#include <linux/module.h> 7#include <linux/module.h>
8 8
9static void memcg_tcp_enter_memory_pressure(struct sock *sk)
10{
11 if (sk->sk_cgrp->memory_pressure)
12 sk->sk_cgrp->memory_pressure = 1;
13}
14EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
15
16int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 9int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
17{ 10{
18 /* 11 /*
@@ -60,7 +53,6 @@ EXPORT_SYMBOL(tcp_destroy_cgroup);
60static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) 53static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
61{ 54{
62 struct cg_proto *cg_proto; 55 struct cg_proto *cg_proto;
63 u64 old_lim;
64 int i; 56 int i;
65 int ret; 57 int ret;
66 58
@@ -71,7 +63,6 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
71 if (val > RES_COUNTER_MAX) 63 if (val > RES_COUNTER_MAX)
72 val = RES_COUNTER_MAX; 64 val = RES_COUNTER_MAX;
73 65
74 old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
75 ret = res_counter_set_limit(&cg_proto->memory_allocated, val); 66 ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
76 if (ret) 67 if (ret)
77 return ret; 68 return ret;
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 2ab09cbae74d..06493736fbc8 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -663,10 +663,13 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
663void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 663void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
664 struct tcp_fastopen_cookie *cookie, bool syn_lost) 664 struct tcp_fastopen_cookie *cookie, bool syn_lost)
665{ 665{
666 struct dst_entry *dst = __sk_dst_get(sk);
666 struct tcp_metrics_block *tm; 667 struct tcp_metrics_block *tm;
667 668
669 if (!dst)
670 return;
668 rcu_read_lock(); 671 rcu_read_lock();
669 tm = tcp_get_metrics(sk, __sk_dst_get(sk), true); 672 tm = tcp_get_metrics(sk, dst, true);
670 if (tm) { 673 if (tm) {
671 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; 674 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
672 675
@@ -988,7 +991,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
988 return 0; 991 return 0;
989} 992}
990 993
991static struct genl_ops tcp_metrics_nl_ops[] = { 994static const struct genl_ops tcp_metrics_nl_ops[] = {
992 { 995 {
993 .cmd = TCP_METRICS_CMD_GET, 996 .cmd = TCP_METRICS_CMD_GET,
994 .doit = tcp_metrics_nl_cmd_get, 997 .doit = tcp_metrics_nl_cmd_get,
@@ -1079,8 +1082,7 @@ void __init tcp_metrics_init(void)
1079 if (ret < 0) 1082 if (ret < 0)
1080 goto cleanup; 1083 goto cleanup;
1081 ret = genl_register_family_with_ops(&tcp_metrics_nl_family, 1084 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1082 tcp_metrics_nl_ops, 1085 tcp_metrics_nl_ops);
1083 ARRAY_SIZE(tcp_metrics_nl_ops));
1084 if (ret < 0) 1086 if (ret < 0)
1085 goto cleanup_subsys; 1087 goto cleanup_subsys;
1086 return; 1088 return;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index a2b68a108eae..05606353c7e7 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -274,33 +274,32 @@ static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *
274{ 274{
275 const struct iphdr *iph = skb_gro_network_header(skb); 275 const struct iphdr *iph = skb_gro_network_header(skb);
276 __wsum wsum; 276 __wsum wsum;
277 __sum16 sum; 277
278 /* Don't bother verifying checksum if we're going to flush anyway. */
279 if (NAPI_GRO_CB(skb)->flush)
280 goto skip_csum;
281
282 wsum = skb->csum;
278 283
279 switch (skb->ip_summed) { 284 switch (skb->ip_summed) {
285 case CHECKSUM_NONE:
286 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
287 0);
288
289 /* fall through */
290
280 case CHECKSUM_COMPLETE: 291 case CHECKSUM_COMPLETE:
281 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, 292 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
282 skb->csum)) { 293 wsum)) {
283 skb->ip_summed = CHECKSUM_UNNECESSARY; 294 skb->ip_summed = CHECKSUM_UNNECESSARY;
284 break; 295 break;
285 } 296 }
286flush: 297
287 NAPI_GRO_CB(skb)->flush = 1; 298 NAPI_GRO_CB(skb)->flush = 1;
288 return NULL; 299 return NULL;
289
290 case CHECKSUM_NONE:
291 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
292 skb_gro_len(skb), IPPROTO_TCP, 0);
293 sum = csum_fold(skb_checksum(skb,
294 skb_gro_offset(skb),
295 skb_gro_len(skb),
296 wsum));
297 if (sum)
298 goto flush;
299
300 skb->ip_summed = CHECKSUM_UNNECESSARY;
301 break;
302 } 300 }
303 301
302skip_csum:
304 return tcp_gro_receive(head, skb); 303 return tcp_gro_receive(head, skb);
305} 304}
306 305
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 672854664ff5..7820f3a7dd70 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1875,8 +1875,12 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1875 * - better RTT estimation and ACK scheduling 1875 * - better RTT estimation and ACK scheduling
1876 * - faster recovery 1876 * - faster recovery
1877 * - high rates 1877 * - high rates
1878 * Alas, some drivers / subsystems require a fair amount
1879 * of queued bytes to ensure line rate.
1880 * One example is wifi aggregation (802.11 AMPDU)
1878 */ 1881 */
1879 limit = max(skb->truesize, sk->sk_pacing_rate >> 10); 1882 limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
1883 sk->sk_pacing_rate >> 10);
1880 1884
1881 if (atomic_read(&sk->sk_wmem_alloc) > limit) { 1885 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
1882 set_bit(TSQ_THROTTLED, &tp->tsq_flags); 1886 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
@@ -3093,7 +3097,6 @@ void tcp_send_window_probe(struct sock *sk)
3093{ 3097{
3094 if (sk->sk_state == TCP_ESTABLISHED) { 3098 if (sk->sk_state == TCP_ESTABLISHED) {
3095 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 3099 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3096 tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
3097 tcp_xmit_probe_skb(sk, 0); 3100 tcp_xmit_probe_skb(sk, 0);
3098 } 3101 }
3099} 3102}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index de86e5bc4462..62c19fdd102d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -560,15 +560,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
560 __be16 sport, __be16 dport, 560 __be16 sport, __be16 dport,
561 struct udp_table *udptable) 561 struct udp_table *udptable)
562{ 562{
563 struct sock *sk;
564 const struct iphdr *iph = ip_hdr(skb); 563 const struct iphdr *iph = ip_hdr(skb);
565 564
566 if (unlikely(sk = skb_steal_sock(skb))) 565 return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
567 return sk; 566 iph->daddr, dport, inet_iif(skb),
568 else 567 udptable);
569 return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
570 iph->daddr, dport, inet_iif(skb),
571 udptable);
572} 568}
573 569
574struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 570struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
@@ -999,7 +995,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
999 err = PTR_ERR(rt); 995 err = PTR_ERR(rt);
1000 rt = NULL; 996 rt = NULL;
1001 if (err == -ENETUNREACH) 997 if (err == -ENETUNREACH)
1002 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 998 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
1003 goto out; 999 goto out;
1004 } 1000 }
1005 1001
@@ -1098,6 +1094,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
1098 struct udp_sock *up = udp_sk(sk); 1094 struct udp_sock *up = udp_sk(sk);
1099 int ret; 1095 int ret;
1100 1096
1097 if (flags & MSG_SENDPAGE_NOTLAST)
1098 flags |= MSG_MORE;
1099
1101 if (!up->pending) { 1100 if (!up->pending) {
1102 struct msghdr msg = { .msg_flags = flags|MSG_MORE }; 1101 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
1103 1102
@@ -1235,14 +1234,8 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1235 int is_udplite = IS_UDPLITE(sk); 1234 int is_udplite = IS_UDPLITE(sk);
1236 bool slow; 1235 bool slow;
1237 1236
1238 /*
1239 * Check any passed addresses
1240 */
1241 if (addr_len)
1242 *addr_len = sizeof(*sin);
1243
1244 if (flags & MSG_ERRQUEUE) 1237 if (flags & MSG_ERRQUEUE)
1245 return ip_recv_error(sk, msg, len); 1238 return ip_recv_error(sk, msg, len, addr_len);
1246 1239
1247try_again: 1240try_again:
1248 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 1241 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
@@ -1302,6 +1295,7 @@ try_again:
1302 sin->sin_port = udp_hdr(skb)->source; 1295 sin->sin_port = udp_hdr(skb)->source;
1303 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 1296 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
1304 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 1297 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
1298 *addr_len = sizeof(*sin);
1305 } 1299 }
1306 if (inet->cmsg_flags) 1300 if (inet->cmsg_flags)
1307 ip_cmsg_recv(msg, skb); 1301 ip_cmsg_recv(msg, skb);
@@ -1605,12 +1599,21 @@ static void flush_stack(struct sock **stack, unsigned int count,
1605 kfree_skb(skb1); 1599 kfree_skb(skb1);
1606} 1600}
1607 1601
1608static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) 1602/* For TCP sockets, sk_rx_dst is protected by socket lock
1603 * For UDP, we use sk_dst_lock to guard against concurrent changes.
1604 */
1605static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1609{ 1606{
1610 struct dst_entry *dst = skb_dst(skb); 1607 struct dst_entry *old;
1611 1608
1612 dst_hold(dst); 1609 spin_lock(&sk->sk_dst_lock);
1613 sk->sk_rx_dst = dst; 1610 old = sk->sk_rx_dst;
1611 if (likely(old != dst)) {
1612 dst_hold(dst);
1613 sk->sk_rx_dst = dst;
1614 dst_release(old);
1615 }
1616 spin_unlock(&sk->sk_dst_lock);
1614} 1617}
1615 1618
1616/* 1619/*
@@ -1741,15 +1744,16 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1741 if (udp4_csum_init(skb, uh, proto)) 1744 if (udp4_csum_init(skb, uh, proto))
1742 goto csum_error; 1745 goto csum_error;
1743 1746
1744 if (skb->sk) { 1747 sk = skb_steal_sock(skb);
1748 if (sk) {
1749 struct dst_entry *dst = skb_dst(skb);
1745 int ret; 1750 int ret;
1746 sk = skb->sk;
1747 1751
1748 if (unlikely(sk->sk_rx_dst == NULL)) 1752 if (unlikely(sk->sk_rx_dst != dst))
1749 udp_sk_rx_dst_set(sk, skb); 1753 udp_sk_rx_dst_set(sk, dst);
1750 1754
1751 ret = udp_queue_rcv_skb(sk, skb); 1755 ret = udp_queue_rcv_skb(sk, skb);
1752 1756 sock_put(sk);
1753 /* a return value > 0 means to resubmit the input, but 1757 /* a return value > 0 means to resubmit the input, but
1754 * it wants the return to be -protocol, or 0 1758 * it wants the return to be -protocol, or 0
1755 */ 1759 */
@@ -1915,17 +1919,20 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
1915 1919
1916void udp_v4_early_demux(struct sk_buff *skb) 1920void udp_v4_early_demux(struct sk_buff *skb)
1917{ 1921{
1918 const struct iphdr *iph = ip_hdr(skb); 1922 struct net *net = dev_net(skb->dev);
1919 const struct udphdr *uh = udp_hdr(skb); 1923 const struct iphdr *iph;
1924 const struct udphdr *uh;
1920 struct sock *sk; 1925 struct sock *sk;
1921 struct dst_entry *dst; 1926 struct dst_entry *dst;
1922 struct net *net = dev_net(skb->dev);
1923 int dif = skb->dev->ifindex; 1927 int dif = skb->dev->ifindex;
1924 1928
1925 /* validate the packet */ 1929 /* validate the packet */
1926 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) 1930 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
1927 return; 1931 return;
1928 1932
1933 iph = ip_hdr(skb);
1934 uh = udp_hdr(skb);
1935
1929 if (skb->pkt_type == PACKET_BROADCAST || 1936 if (skb->pkt_type == PACKET_BROADCAST ||
1930 skb->pkt_type == PACKET_MULTICAST) 1937 skb->pkt_type == PACKET_MULTICAST)
1931 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, 1938 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 5658d9d51637..d5fa5b8c443e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1996,23 +1996,6 @@ static void addrconf_add_mroute(struct net_device *dev)
1996 ip6_route_add(&cfg); 1996 ip6_route_add(&cfg);
1997} 1997}
1998 1998
1999#if IS_ENABLED(CONFIG_IPV6_SIT)
2000static void sit_route_add(struct net_device *dev)
2001{
2002 struct fib6_config cfg = {
2003 .fc_table = RT6_TABLE_MAIN,
2004 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2005 .fc_ifindex = dev->ifindex,
2006 .fc_dst_len = 96,
2007 .fc_flags = RTF_UP | RTF_NONEXTHOP,
2008 .fc_nlinfo.nl_net = dev_net(dev),
2009 };
2010
2011 /* prefix length - 96 bits "::d.d.d.d" */
2012 ip6_route_add(&cfg);
2013}
2014#endif
2015
2016static struct inet6_dev *addrconf_add_dev(struct net_device *dev) 1999static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2017{ 2000{
2018 struct inet6_dev *idev; 2001 struct inet6_dev *idev;
@@ -2542,7 +2525,8 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2542 struct in6_addr addr; 2525 struct in6_addr addr;
2543 struct net_device *dev; 2526 struct net_device *dev;
2544 struct net *net = dev_net(idev->dev); 2527 struct net *net = dev_net(idev->dev);
2545 int scope; 2528 int scope, plen;
2529 u32 pflags = 0;
2546 2530
2547 ASSERT_RTNL(); 2531 ASSERT_RTNL();
2548 2532
@@ -2552,12 +2536,16 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2552 if (idev->dev->flags&IFF_POINTOPOINT) { 2536 if (idev->dev->flags&IFF_POINTOPOINT) {
2553 addr.s6_addr32[0] = htonl(0xfe800000); 2537 addr.s6_addr32[0] = htonl(0xfe800000);
2554 scope = IFA_LINK; 2538 scope = IFA_LINK;
2539 plen = 64;
2555 } else { 2540 } else {
2556 scope = IPV6_ADDR_COMPATv4; 2541 scope = IPV6_ADDR_COMPATv4;
2542 plen = 96;
2543 pflags |= RTF_NONEXTHOP;
2557 } 2544 }
2558 2545
2559 if (addr.s6_addr32[3]) { 2546 if (addr.s6_addr32[3]) {
2560 add_addr(idev, &addr, 128, scope); 2547 add_addr(idev, &addr, plen, scope);
2548 addrconf_prefix_route(&addr, plen, idev->dev, 0, pflags);
2561 return; 2549 return;
2562 } 2550 }
2563 2551
@@ -2569,7 +2557,6 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2569 int flag = scope; 2557 int flag = scope;
2570 2558
2571 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 2559 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
2572 int plen;
2573 2560
2574 addr.s6_addr32[3] = ifa->ifa_local; 2561 addr.s6_addr32[3] = ifa->ifa_local;
2575 2562
@@ -2580,12 +2567,10 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2580 continue; 2567 continue;
2581 flag |= IFA_HOST; 2568 flag |= IFA_HOST;
2582 } 2569 }
2583 if (idev->dev->flags&IFF_POINTOPOINT)
2584 plen = 64;
2585 else
2586 plen = 96;
2587 2570
2588 add_addr(idev, &addr, plen, flag); 2571 add_addr(idev, &addr, plen, flag);
2572 addrconf_prefix_route(&addr, plen, idev->dev, 0,
2573 pflags);
2589 } 2574 }
2590 } 2575 }
2591 } 2576 }
@@ -2628,7 +2613,7 @@ static void init_loopback(struct net_device *dev)
2628 if (sp_ifa->rt) 2613 if (sp_ifa->rt)
2629 continue; 2614 continue;
2630 2615
2631 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); 2616 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, false);
2632 2617
2633 /* Failure cases are ignored */ 2618 /* Failure cases are ignored */
2634 if (!IS_ERR(sp_rt)) { 2619 if (!IS_ERR(sp_rt)) {
@@ -2711,7 +2696,6 @@ static void addrconf_sit_config(struct net_device *dev)
2711 struct in6_addr addr; 2696 struct in6_addr addr;
2712 2697
2713 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); 2698 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
2714 addrconf_prefix_route(&addr, 64, dev, 0, 0);
2715 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) 2699 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
2716 addrconf_add_linklocal(idev, &addr); 2700 addrconf_add_linklocal(idev, &addr);
2717 return; 2701 return;
@@ -2721,8 +2705,6 @@ static void addrconf_sit_config(struct net_device *dev)
2721 2705
2722 if (dev->flags&IFF_POINTOPOINT) 2706 if (dev->flags&IFF_POINTOPOINT)
2723 addrconf_add_mroute(dev); 2707 addrconf_add_mroute(dev);
2724 else
2725 sit_route_add(dev);
2726} 2708}
2727#endif 2709#endif
2728 2710
@@ -2740,8 +2722,6 @@ static void addrconf_gre_config(struct net_device *dev)
2740 } 2722 }
2741 2723
2742 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); 2724 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
2743 addrconf_prefix_route(&addr, 64, dev, 0, 0);
2744
2745 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) 2725 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
2746 addrconf_add_linklocal(idev, &addr); 2726 addrconf_add_linklocal(idev, &addr);
2747} 2727}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index ff75313f27a8..4fbdb7046d28 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -972,10 +972,10 @@ out:
972 972
973#ifdef CONFIG_SYSCTL 973#ifdef CONFIG_SYSCTL
974sysctl_fail: 974sysctl_fail:
975 ipv6_packet_cleanup(); 975 pingv6_exit();
976#endif 976#endif
977pingv6_fail: 977pingv6_fail:
978 pingv6_exit(); 978 ipv6_packet_cleanup();
979ipv6_packet_fail: 979ipv6_packet_fail:
980 tcpv6_exit(); 980 tcpv6_exit();
981tcpv6_fail: 981tcpv6_fail:
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a454b0ff57c7..93b1aa34c432 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -73,7 +73,6 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
73 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 73 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
74 if (flowlabel == NULL) 74 if (flowlabel == NULL)
75 return -EINVAL; 75 return -EINVAL;
76 usin->sin6_addr = flowlabel->dst;
77 } 76 }
78 } 77 }
79 78
@@ -318,7 +317,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
318/* 317/*
319 * Handle MSG_ERRQUEUE 318 * Handle MSG_ERRQUEUE
320 */ 319 */
321int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) 320int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
322{ 321{
323 struct ipv6_pinfo *np = inet6_sk(sk); 322 struct ipv6_pinfo *np = inet6_sk(sk);
324 struct sock_exterr_skb *serr; 323 struct sock_exterr_skb *serr;
@@ -369,6 +368,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
369 &sin->sin6_addr); 368 &sin->sin6_addr);
370 sin->sin6_scope_id = 0; 369 sin->sin6_scope_id = 0;
371 } 370 }
371 *addr_len = sizeof(*sin);
372 } 372 }
373 373
374 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 374 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
@@ -377,6 +377,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
377 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { 377 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
378 sin->sin6_family = AF_INET6; 378 sin->sin6_family = AF_INET6;
379 sin->sin6_flowinfo = 0; 379 sin->sin6_flowinfo = 0;
380 sin->sin6_port = 0;
380 if (skb->protocol == htons(ETH_P_IPV6)) { 381 if (skb->protocol == htons(ETH_P_IPV6)) {
381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 382 sin->sin6_addr = ipv6_hdr(skb)->saddr;
382 if (np->rxopt.all) 383 if (np->rxopt.all)
@@ -423,7 +424,8 @@ EXPORT_SYMBOL_GPL(ipv6_recv_error);
423/* 424/*
424 * Handle IPV6_RECVPATHMTU 425 * Handle IPV6_RECVPATHMTU
425 */ 426 */
426int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) 427int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
428 int *addr_len)
427{ 429{
428 struct ipv6_pinfo *np = inet6_sk(sk); 430 struct ipv6_pinfo *np = inet6_sk(sk);
429 struct sk_buff *skb; 431 struct sk_buff *skb;
@@ -457,6 +459,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
457 sin->sin6_port = 0; 459 sin->sin6_port = 0;
458 sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; 460 sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
459 sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr; 461 sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
462 *addr_len = sizeof(*sin);
460 } 463 }
461 464
462 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); 465 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index e27591635f92..3fd0a578329e 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -122,7 +122,11 @@ out:
122static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) 122static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
123{ 123{
124 struct rt6_info *rt = (struct rt6_info *) arg->result; 124 struct rt6_info *rt = (struct rt6_info *) arg->result;
125 struct net_device *dev = rt->rt6i_idev->dev; 125 struct net_device *dev = NULL;
126
127 if (rt->rt6i_idev)
128 dev = rt->rt6i_idev->dev;
129
126 /* do not accept result if the route does 130 /* do not accept result if the route does
127 * not meet the required prefix length 131 * not meet the required prefix length
128 */ 132 */
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 59df872e2f4d..4acdb63495db 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -116,8 +116,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
116 } 116 }
117 rcu_read_unlock_bh(); 117 rcu_read_unlock_bh();
118 118
119 IP6_INC_STATS_BH(dev_net(dst->dev), 119 IP6_INC_STATS(dev_net(dst->dev),
120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
121 kfree_skb(skb); 121 kfree_skb(skb);
122 return -EINVAL; 122 return -EINVAL;
123} 123}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index df1fa58528c6..d6062325db08 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1642,6 +1642,15 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
1642 return ip6_tnl_update(t, &p); 1642 return ip6_tnl_update(t, &p);
1643} 1643}
1644 1644
1645static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
1646{
1647 struct net *net = dev_net(dev);
1648 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1649
1650 if (dev != ip6n->fb_tnl_dev)
1651 unregister_netdevice_queue(dev, head);
1652}
1653
1645static size_t ip6_tnl_get_size(const struct net_device *dev) 1654static size_t ip6_tnl_get_size(const struct net_device *dev)
1646{ 1655{
1647 return 1656 return
@@ -1706,6 +1715,7 @@ static struct rtnl_link_ops ip6_link_ops __read_mostly = {
1706 .validate = ip6_tnl_validate, 1715 .validate = ip6_tnl_validate,
1707 .newlink = ip6_tnl_newlink, 1716 .newlink = ip6_tnl_newlink,
1708 .changelink = ip6_tnl_changelink, 1717 .changelink = ip6_tnl_changelink,
1718 .dellink = ip6_tnl_dellink,
1709 .get_size = ip6_tnl_get_size, 1719 .get_size = ip6_tnl_get_size,
1710 .fill_info = ip6_tnl_fill_info, 1720 .fill_info = ip6_tnl_fill_info,
1711}; 1721};
@@ -1722,9 +1732,9 @@ static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
1722 .priority = 1, 1732 .priority = 1,
1723}; 1733};
1724 1734
1725static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) 1735static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
1726{ 1736{
1727 struct net *net = dev_net(ip6n->fb_tnl_dev); 1737 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1728 struct net_device *dev, *aux; 1738 struct net_device *dev, *aux;
1729 int h; 1739 int h;
1730 struct ip6_tnl *t; 1740 struct ip6_tnl *t;
@@ -1792,10 +1802,8 @@ err_alloc_dev:
1792 1802
1793static void __net_exit ip6_tnl_exit_net(struct net *net) 1803static void __net_exit ip6_tnl_exit_net(struct net *net)
1794{ 1804{
1795 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1796
1797 rtnl_lock(); 1805 rtnl_lock();
1798 ip6_tnl_destroy_tunnels(ip6n); 1806 ip6_tnl_destroy_tunnels(net);
1799 rtnl_unlock(); 1807 rtnl_unlock();
1800} 1808}
1801 1809
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index f8a55ff1971b..300865171394 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1277,6 +1277,9 @@ skip_linkparms:
1277 ri->prefix_len == 0) 1277 ri->prefix_len == 0)
1278 continue; 1278 continue;
1279#endif 1279#endif
1280 if (ri->prefix_len == 0 &&
1281 !in6_dev->cnf.accept_ra_defrtr)
1282 continue;
1280 if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) 1283 if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
1281 continue; 1284 continue;
1282 rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3, 1285 rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3,
@@ -1726,8 +1729,8 @@ int __init ndisc_init(void)
1726 &ndisc_ifinfo_sysctl_change); 1729 &ndisc_ifinfo_sysctl_change);
1727 if (err) 1730 if (err)
1728 goto out_unregister_pernet; 1731 goto out_unregister_pernet;
1729#endif
1730out: 1732out:
1733#endif
1731 return err; 1734 return err;
1732 1735
1733#ifdef CONFIG_SYSCTL 1736#ifdef CONFIG_SYSCTL
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index bf9f612c1bc2..f78f41aca8e9 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -259,6 +259,7 @@ synproxy_recv_client_ack(const struct synproxy_net *snet,
259 259
260 this_cpu_inc(snet->stats->cookie_valid); 260 this_cpu_inc(snet->stats->cookie_valid);
261 opts->mss = mss; 261 opts->mss = mss;
262 opts->options |= XT_SYNPROXY_OPT_MSS;
262 263
263 if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) 264 if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
264 synproxy_check_timestamp_cookie(opts); 265 synproxy_check_timestamp_cookie(opts);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 8815e31a87fe..a83243c3d656 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -57,7 +57,8 @@ static struct inet_protosw pingv6_protosw = {
57 57
58 58
59/* Compatibility glue so we can support IPv6 when it's compiled as a module */ 59/* Compatibility glue so we can support IPv6 when it's compiled as a module */
60static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) 60static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
61 int *addr_len)
61{ 62{
62 return -EAFNOSUPPORT; 63 return -EAFNOSUPPORT;
63} 64}
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 22d1bd4670da..e048cf1bb6a2 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -36,10 +36,6 @@ int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol
36} 36}
37EXPORT_SYMBOL(inet6_add_protocol); 37EXPORT_SYMBOL(inet6_add_protocol);
38 38
39/*
40 * Remove a protocol from the hash tables.
41 */
42
43int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) 39int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
44{ 40{
45 int ret; 41 int ret;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 3c00842b0079..b6bb87e55805 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -465,14 +465,11 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
465 if (flags & MSG_OOB) 465 if (flags & MSG_OOB)
466 return -EOPNOTSUPP; 466 return -EOPNOTSUPP;
467 467
468 if (addr_len)
469 *addr_len=sizeof(*sin6);
470
471 if (flags & MSG_ERRQUEUE) 468 if (flags & MSG_ERRQUEUE)
472 return ipv6_recv_error(sk, msg, len); 469 return ipv6_recv_error(sk, msg, len, addr_len);
473 470
474 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 471 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
475 return ipv6_recv_rxpmtu(sk, msg, len); 472 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
476 473
477 skb = skb_recv_datagram(sk, flags, noblock, &err); 474 skb = skb_recv_datagram(sk, flags, noblock, &err);
478 if (!skb) 475 if (!skb)
@@ -506,6 +503,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
506 sin6->sin6_flowinfo = 0; 503 sin6->sin6_flowinfo = 0;
507 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, 504 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
508 IP6CB(skb)->iif); 505 IP6CB(skb)->iif);
506 *addr_len = sizeof(*sin6);
509 } 507 }
510 508
511 sock_recv_ts_and_drops(msg, sk, skb); 509 sock_recv_ts_and_drops(msg, sk, skb);
@@ -794,7 +792,6 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
794 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 792 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
795 if (flowlabel == NULL) 793 if (flowlabel == NULL)
796 return -EINVAL; 794 return -EINVAL;
797 daddr = &flowlabel->dst;
798 } 795 }
799 } 796 }
800 797
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7faa9d5e1503..a0a48ac3403f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -84,6 +84,8 @@ static int ip6_dst_gc(struct dst_ops *ops);
84 84
85static int ip6_pkt_discard(struct sk_buff *skb); 85static int ip6_pkt_discard(struct sk_buff *skb);
86static int ip6_pkt_discard_out(struct sk_buff *skb); 86static int ip6_pkt_discard_out(struct sk_buff *skb);
87static int ip6_pkt_prohibit(struct sk_buff *skb);
88static int ip6_pkt_prohibit_out(struct sk_buff *skb);
87static void ip6_link_failure(struct sk_buff *skb); 89static void ip6_link_failure(struct sk_buff *skb);
88static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 90static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
89 struct sk_buff *skb, u32 mtu); 91 struct sk_buff *skb, u32 mtu);
@@ -234,9 +236,6 @@ static const struct rt6_info ip6_null_entry_template = {
234 236
235#ifdef CONFIG_IPV6_MULTIPLE_TABLES 237#ifdef CONFIG_IPV6_MULTIPLE_TABLES
236 238
237static int ip6_pkt_prohibit(struct sk_buff *skb);
238static int ip6_pkt_prohibit_out(struct sk_buff *skb);
239
240static const struct rt6_info ip6_prohibit_entry_template = { 239static const struct rt6_info ip6_prohibit_entry_template = {
241 .dst = { 240 .dst = {
242 .__refcnt = ATOMIC_INIT(1), 241 .__refcnt = ATOMIC_INIT(1),
@@ -1565,21 +1564,24 @@ int ip6_route_add(struct fib6_config *cfg)
1565 goto out; 1564 goto out;
1566 } 1565 }
1567 } 1566 }
1568 rt->dst.output = ip6_pkt_discard_out;
1569 rt->dst.input = ip6_pkt_discard;
1570 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; 1567 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1571 switch (cfg->fc_type) { 1568 switch (cfg->fc_type) {
1572 case RTN_BLACKHOLE: 1569 case RTN_BLACKHOLE:
1573 rt->dst.error = -EINVAL; 1570 rt->dst.error = -EINVAL;
1571 rt->dst.output = dst_discard;
1572 rt->dst.input = dst_discard;
1574 break; 1573 break;
1575 case RTN_PROHIBIT: 1574 case RTN_PROHIBIT:
1576 rt->dst.error = -EACCES; 1575 rt->dst.error = -EACCES;
1576 rt->dst.output = ip6_pkt_prohibit_out;
1577 rt->dst.input = ip6_pkt_prohibit;
1577 break; 1578 break;
1578 case RTN_THROW: 1579 case RTN_THROW:
1579 rt->dst.error = -EAGAIN;
1580 break;
1581 default: 1580 default:
1582 rt->dst.error = -ENETUNREACH; 1581 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1582 : -ENETUNREACH;
1583 rt->dst.output = ip6_pkt_discard_out;
1584 rt->dst.input = ip6_pkt_discard;
1583 break; 1585 break;
1584 } 1586 }
1585 goto install_route; 1587 goto install_route;
@@ -2144,8 +2146,6 @@ static int ip6_pkt_discard_out(struct sk_buff *skb)
2144 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); 2146 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2145} 2147}
2146 2148
2147#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2148
2149static int ip6_pkt_prohibit(struct sk_buff *skb) 2149static int ip6_pkt_prohibit(struct sk_buff *skb)
2150{ 2150{
2151 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); 2151 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
@@ -2157,8 +2157,6 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2157 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); 2157 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2158} 2158}
2159 2159
2160#endif
2161
2162/* 2160/*
2163 * Allocate a dst for local (unicast / anycast) address. 2161 * Allocate a dst for local (unicast / anycast) address.
2164 */ 2162 */
@@ -2168,12 +2166,10 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2168 bool anycast) 2166 bool anycast)
2169{ 2167{
2170 struct net *net = dev_net(idev->dev); 2168 struct net *net = dev_net(idev->dev);
2171 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL); 2169 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2172 2170 DST_NOCOUNT, NULL);
2173 if (!rt) { 2171 if (!rt)
2174 net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
2175 return ERR_PTR(-ENOMEM); 2172 return ERR_PTR(-ENOMEM);
2176 }
2177 2173
2178 in6_dev_hold(idev); 2174 in6_dev_hold(idev);
2179 2175
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index bfc6fcea3841..366fbba3359a 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -478,14 +478,44 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
478 dev_put(dev); 478 dev_put(dev);
479} 479}
480 480
481/* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
482 * if sufficient data bytes are available
483 */
484static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
485{
486 const struct iphdr *iph = (const struct iphdr *) skb->data;
487 struct rt6_info *rt;
488 struct sk_buff *skb2;
489
490 if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8))
491 return 1;
492
493 skb2 = skb_clone(skb, GFP_ATOMIC);
494
495 if (!skb2)
496 return 1;
497
498 skb_dst_drop(skb2);
499 skb_pull(skb2, iph->ihl * 4);
500 skb_reset_network_header(skb2);
501
502 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
503
504 if (rt && rt->dst.dev)
505 skb2->dev = rt->dst.dev;
506
507 icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
508
509 if (rt)
510 ip6_rt_put(rt);
511
512 kfree_skb(skb2);
513
514 return 0;
515}
481 516
482static int ipip6_err(struct sk_buff *skb, u32 info) 517static int ipip6_err(struct sk_buff *skb, u32 info)
483{ 518{
484
485/* All the routers (except for Linux) return only
486 8 bytes of packet payload. It means, that precise relaying of
487 ICMP in the real Internet is absolutely infeasible.
488 */
489 const struct iphdr *iph = (const struct iphdr *)skb->data; 519 const struct iphdr *iph = (const struct iphdr *)skb->data;
490 const int type = icmp_hdr(skb)->type; 520 const int type = icmp_hdr(skb)->type;
491 const int code = icmp_hdr(skb)->code; 521 const int code = icmp_hdr(skb)->code;
@@ -500,7 +530,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
500 case ICMP_DEST_UNREACH: 530 case ICMP_DEST_UNREACH:
501 switch (code) { 531 switch (code) {
502 case ICMP_SR_FAILED: 532 case ICMP_SR_FAILED:
503 case ICMP_PORT_UNREACH:
504 /* Impossible event. */ 533 /* Impossible event. */
505 return 0; 534 return 0;
506 default: 535 default:
@@ -545,6 +574,9 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
545 goto out; 574 goto out;
546 575
547 err = 0; 576 err = 0;
577 if (!ipip6_err_gen_icmpv6_unreach(skb))
578 goto out;
579
548 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 580 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
549 goto out; 581 goto out;
550 582
@@ -919,7 +951,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
919 if (!new_skb) { 951 if (!new_skb) {
920 ip_rt_put(rt); 952 ip_rt_put(rt);
921 dev->stats.tx_dropped++; 953 dev->stats.tx_dropped++;
922 dev_kfree_skb(skb); 954 kfree_skb(skb);
923 return NETDEV_TX_OK; 955 return NETDEV_TX_OK;
924 } 956 }
925 if (skb->sk) 957 if (skb->sk)
@@ -945,7 +977,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
945tx_error_icmp: 977tx_error_icmp:
946 dst_link_failure(skb); 978 dst_link_failure(skb);
947tx_error: 979tx_error:
948 dev_kfree_skb(skb); 980 kfree_skb(skb);
949out: 981out:
950 dev->stats.tx_errors++; 982 dev->stats.tx_errors++;
951 return NETDEV_TX_OK; 983 return NETDEV_TX_OK;
@@ -985,7 +1017,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
985 1017
986tx_err: 1018tx_err:
987 dev->stats.tx_errors++; 1019 dev->stats.tx_errors++;
988 dev_kfree_skb(skb); 1020 kfree_skb(skb);
989 return NETDEV_TX_OK; 1021 return NETDEV_TX_OK;
990 1022
991} 1023}
@@ -1619,6 +1651,15 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
1619#endif 1651#endif
1620}; 1652};
1621 1653
1654static void ipip6_dellink(struct net_device *dev, struct list_head *head)
1655{
1656 struct net *net = dev_net(dev);
1657 struct sit_net *sitn = net_generic(net, sit_net_id);
1658
1659 if (dev != sitn->fb_tunnel_dev)
1660 unregister_netdevice_queue(dev, head);
1661}
1662
1622static struct rtnl_link_ops sit_link_ops __read_mostly = { 1663static struct rtnl_link_ops sit_link_ops __read_mostly = {
1623 .kind = "sit", 1664 .kind = "sit",
1624 .maxtype = IFLA_IPTUN_MAX, 1665 .maxtype = IFLA_IPTUN_MAX,
@@ -1630,6 +1671,7 @@ static struct rtnl_link_ops sit_link_ops __read_mostly = {
1630 .changelink = ipip6_changelink, 1671 .changelink = ipip6_changelink,
1631 .get_size = ipip6_get_size, 1672 .get_size = ipip6_get_size,
1632 .fill_info = ipip6_fill_info, 1673 .fill_info = ipip6_fill_info,
1674 .dellink = ipip6_dellink,
1633}; 1675};
1634 1676
1635static struct xfrm_tunnel sit_handler __read_mostly = { 1677static struct xfrm_tunnel sit_handler __read_mostly = {
@@ -1644,9 +1686,10 @@ static struct xfrm_tunnel ipip_handler __read_mostly = {
1644 .priority = 2, 1686 .priority = 2,
1645}; 1687};
1646 1688
1647static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) 1689static void __net_exit sit_destroy_tunnels(struct net *net,
1690 struct list_head *head)
1648{ 1691{
1649 struct net *net = dev_net(sitn->fb_tunnel_dev); 1692 struct sit_net *sitn = net_generic(net, sit_net_id);
1650 struct net_device *dev, *aux; 1693 struct net_device *dev, *aux;
1651 int prio; 1694 int prio;
1652 1695
@@ -1721,11 +1764,10 @@ err_alloc_dev:
1721 1764
1722static void __net_exit sit_exit_net(struct net *net) 1765static void __net_exit sit_exit_net(struct net *net)
1723{ 1766{
1724 struct sit_net *sitn = net_generic(net, sit_net_id);
1725 LIST_HEAD(list); 1767 LIST_HEAD(list);
1726 1768
1727 rtnl_lock(); 1769 rtnl_lock();
1728 sit_destroy_tunnels(sitn, &list); 1770 sit_destroy_tunnels(net, &list);
1729 unregister_netdevice_many(&list); 1771 unregister_netdevice_many(&list);
1730 rtnl_unlock(); 1772 rtnl_unlock();
1731} 1773}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0740f93a114a..f67033b4bb66 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -156,7 +156,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 if (flowlabel == NULL) 157 if (flowlabel == NULL)
158 return -EINVAL; 158 return -EINVAL;
159 usin->sin6_addr = flowlabel->dst;
160 fl6_sock_release(flowlabel); 159 fl6_sock_release(flowlabel);
161 } 160 }
162 } 161 }
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index c1097c798900..6d18157dc32c 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -37,34 +37,32 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
37{ 37{
38 const struct ipv6hdr *iph = skb_gro_network_header(skb); 38 const struct ipv6hdr *iph = skb_gro_network_header(skb);
39 __wsum wsum; 39 __wsum wsum;
40 __sum16 sum; 40
41 /* Don't bother verifying checksum if we're going to flush anyway. */
42 if (NAPI_GRO_CB(skb)->flush)
43 goto skip_csum;
44
45 wsum = skb->csum;
41 46
42 switch (skb->ip_summed) { 47 switch (skb->ip_summed) {
48 case CHECKSUM_NONE:
49 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
50 wsum);
51
52 /* fall through */
53
43 case CHECKSUM_COMPLETE: 54 case CHECKSUM_COMPLETE:
44 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, 55 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
45 skb->csum)) { 56 wsum)) {
46 skb->ip_summed = CHECKSUM_UNNECESSARY; 57 skb->ip_summed = CHECKSUM_UNNECESSARY;
47 break; 58 break;
48 } 59 }
49flush: 60
50 NAPI_GRO_CB(skb)->flush = 1; 61 NAPI_GRO_CB(skb)->flush = 1;
51 return NULL; 62 return NULL;
52
53 case CHECKSUM_NONE:
54 wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
55 skb_gro_len(skb),
56 IPPROTO_TCP, 0));
57 sum = csum_fold(skb_checksum(skb,
58 skb_gro_offset(skb),
59 skb_gro_len(skb),
60 wsum));
61 if (sum)
62 goto flush;
63
64 skb->ip_summed = CHECKSUM_UNNECESSARY;
65 break;
66 } 63 }
67 64
65skip_csum:
68 return tcp_gro_receive(head, skb); 66 return tcp_gro_receive(head, skb);
69} 67}
70 68
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f3893e897f72..089c741a3992 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -392,14 +392,11 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
392 int is_udp4; 392 int is_udp4;
393 bool slow; 393 bool slow;
394 394
395 if (addr_len)
396 *addr_len = sizeof(struct sockaddr_in6);
397
398 if (flags & MSG_ERRQUEUE) 395 if (flags & MSG_ERRQUEUE)
399 return ipv6_recv_error(sk, msg, len); 396 return ipv6_recv_error(sk, msg, len, addr_len);
400 397
401 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 398 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
402 return ipv6_recv_rxpmtu(sk, msg, len); 399 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
403 400
404try_again: 401try_again:
405 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 402 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
@@ -480,7 +477,7 @@ try_again:
480 ipv6_iface_scope_id(&sin6->sin6_addr, 477 ipv6_iface_scope_id(&sin6->sin6_addr,
481 IP6CB(skb)->iif); 478 IP6CB(skb)->iif);
482 } 479 }
483 480 *addr_len = sizeof(*sin6);
484 } 481 }
485 if (is_udp4) { 482 if (is_udp4) {
486 if (inet->cmsg_flags) 483 if (inet->cmsg_flags)
@@ -1143,7 +1140,6 @@ do_udp_sendmsg:
1143 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1144 if (flowlabel == NULL) 1141 if (flowlabel == NULL)
1145 return -EINVAL; 1142 return -EINVAL;
1146 daddr = &flowlabel->dst;
1147 } 1143 }
1148 } 1144 }
1149 1145
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 7a1e0fc1bd4d..e096025b477f 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1823,8 +1823,6 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
1823 if (skb->tstamp.tv64) 1823 if (skb->tstamp.tv64)
1824 sk->sk_stamp = skb->tstamp; 1824 sk->sk_stamp = skb->tstamp;
1825 1825
1826 msg->msg_namelen = sizeof(*sipx);
1827
1828 if (sipx) { 1826 if (sipx) {
1829 sipx->sipx_family = AF_IPX; 1827 sipx->sipx_family = AF_IPX;
1830 sipx->sipx_port = ipx->ipx_source.sock; 1828 sipx->sipx_port = ipx->ipx_source.sock;
@@ -1832,6 +1830,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
1832 sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net; 1830 sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
1833 sipx->sipx_type = ipx->ipx_type; 1831 sipx->sipx_type = ipx->ipx_type;
1834 sipx->sipx_zero = 0; 1832 sipx->sipx_zero = 0;
1833 msg->msg_namelen = sizeof(*sipx);
1835 } 1834 }
1836 rc = copied; 1835 rc = copied;
1837 1836
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 0f676908d15b..de7db23049f1 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1385,8 +1385,6 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1385 1385
1386 IRDA_DEBUG(4, "%s()\n", __func__); 1386 IRDA_DEBUG(4, "%s()\n", __func__);
1387 1387
1388 msg->msg_namelen = 0;
1389
1390 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1388 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1391 flags & MSG_DONTWAIT, &err); 1389 flags & MSG_DONTWAIT, &err);
1392 if (!skb) 1390 if (!skb)
@@ -1451,8 +1449,6 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1451 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); 1449 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
1452 timeo = sock_rcvtimeo(sk, noblock); 1450 timeo = sock_rcvtimeo(sk, noblock);
1453 1451
1454 msg->msg_namelen = 0;
1455
1456 do { 1452 do {
1457 int chunk; 1453 int chunk;
1458 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); 1454 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index c32971269280..a37b81fe0479 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -131,7 +131,7 @@ static const struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = {
131 [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 }, 131 [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 },
132}; 132};
133 133
134static struct genl_ops irda_nl_ops[] = { 134static const struct genl_ops irda_nl_ops[] = {
135 { 135 {
136 .cmd = IRDA_NL_CMD_SET_MODE, 136 .cmd = IRDA_NL_CMD_SET_MODE,
137 .doit = irda_nl_set_mode, 137 .doit = irda_nl_set_mode,
@@ -149,8 +149,7 @@ static struct genl_ops irda_nl_ops[] = {
149 149
150int irda_nl_register(void) 150int irda_nl_register(void)
151{ 151{
152 return genl_register_family_with_ops(&irda_nl_family, 152 return genl_register_family_with_ops(&irda_nl_family, irda_nl_ops);
153 irda_nl_ops, ARRAY_SIZE(irda_nl_ops));
154} 153}
155 154
156void irda_nl_unregister(void) 155void irda_nl_unregister(void)
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 168aff5e60de..c4b7218058b6 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1324,8 +1324,6 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1324 int err = 0; 1324 int err = 0;
1325 u32 offset; 1325 u32 offset;
1326 1326
1327 msg->msg_namelen = 0;
1328
1329 if ((sk->sk_state == IUCV_DISCONN) && 1327 if ((sk->sk_state == IUCV_DISCONN) &&
1330 skb_queue_empty(&iucv->backlog_skb_q) && 1328 skb_queue_empty(&iucv->backlog_skb_q) &&
1331 skb_queue_empty(&sk->sk_receive_queue) && 1329 skb_queue_empty(&sk->sk_receive_queue) &&
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 911ef03bf8fb..545f047868ad 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3616,7 +3616,6 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
3616 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) 3616 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
3617 goto out; 3617 goto out;
3618 3618
3619 msg->msg_namelen = 0;
3620 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); 3619 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3621 if (skb == NULL) 3620 if (skb == NULL)
3622 goto out; 3621 goto out;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 571db8dd2292..da1a1cee1a08 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -518,9 +518,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
518 if (flags & MSG_OOB) 518 if (flags & MSG_OOB)
519 goto out; 519 goto out;
520 520
521 if (addr_len)
522 *addr_len = sizeof(*sin);
523
524 skb = skb_recv_datagram(sk, flags, noblock, &err); 521 skb = skb_recv_datagram(sk, flags, noblock, &err);
525 if (!skb) 522 if (!skb)
526 goto out; 523 goto out;
@@ -543,6 +540,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
543 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 540 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
544 sin->sin_port = 0; 541 sin->sin_port = 0;
545 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 542 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
543 *addr_len = sizeof(*sin);
546 } 544 }
547 if (inet->cmsg_flags) 545 if (inet->cmsg_flags)
548 ip_cmsg_recv(msg, skb); 546 ip_cmsg_recv(msg, skb);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index cfd65304be60..bb6e206ea70b 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -528,7 +528,6 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
528 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 528 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
529 if (flowlabel == NULL) 529 if (flowlabel == NULL)
530 return -EINVAL; 530 return -EINVAL;
531 daddr = &flowlabel->dst;
532 } 531 }
533 } 532 }
534 533
@@ -665,7 +664,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
665 *addr_len = sizeof(*lsa); 664 *addr_len = sizeof(*lsa);
666 665
667 if (flags & MSG_ERRQUEUE) 666 if (flags & MSG_ERRQUEUE)
668 return ipv6_recv_error(sk, msg, len); 667 return ipv6_recv_error(sk, msg, len, addr_len);
669 668
670 skb = skb_recv_datagram(sk, flags, noblock, &err); 669 skb = skb_recv_datagram(sk, flags, noblock, &err);
671 if (!skb) 670 if (!skb)
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index be446d517bc9..4cfd722e9153 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -793,7 +793,7 @@ static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
793 }, 793 },
794}; 794};
795 795
796static struct genl_ops l2tp_nl_ops[] = { 796static const struct genl_ops l2tp_nl_ops[] = {
797 { 797 {
798 .cmd = L2TP_CMD_NOOP, 798 .cmd = L2TP_CMD_NOOP,
799 .doit = l2tp_nl_cmd_noop, 799 .doit = l2tp_nl_cmd_noop,
@@ -887,13 +887,8 @@ EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops);
887 887
888static int l2tp_nl_init(void) 888static int l2tp_nl_init(void)
889{ 889{
890 int err;
891
892 pr_info("L2TP netlink interface\n"); 890 pr_info("L2TP netlink interface\n");
893 err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops, 891 return genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops);
894 ARRAY_SIZE(l2tp_nl_ops));
895
896 return err;
897} 892}
898 893
899static void l2tp_nl_cleanup(void) 894static void l2tp_nl_cleanup(void)
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index ffda81ef1a70..be5fadf34739 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -197,8 +197,6 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
197 if (sk->sk_state & PPPOX_BOUND) 197 if (sk->sk_state & PPPOX_BOUND)
198 goto end; 198 goto end;
199 199
200 msg->msg_namelen = 0;
201
202 err = 0; 200 err = 0;
203 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 201 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
204 flags & MSG_DONTWAIT, &err); 202 flags & MSG_DONTWAIT, &err);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 6cba486353e8..7b01b9f5846c 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -720,8 +720,6 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
720 int target; /* Read at least this many bytes */ 720 int target; /* Read at least this many bytes */
721 long timeo; 721 long timeo;
722 722
723 msg->msg_namelen = 0;
724
725 lock_sock(sk); 723 lock_sock(sk);
726 copied = -ENOTCONN; 724 copied = -ENOTCONN;
727 if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) 725 if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 95667b088c5b..364ce0c5962f 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1368,7 +1368,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1368 changed |= 1368 changed |=
1369 ieee80211_mps_set_sta_local_pm(sta, 1369 ieee80211_mps_set_sta_local_pm(sta,
1370 params->local_pm); 1370 params->local_pm);
1371 ieee80211_bss_info_change_notify(sdata, changed); 1371 ieee80211_mbss_info_change_notify(sdata, changed);
1372#endif 1372#endif
1373 } 1373 }
1374 1374
@@ -2488,8 +2488,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2488 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2488 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2489 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2489 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2490 2490
2491 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2491 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2492 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
2493 return -EOPNOTSUPP; 2492 return -EOPNOTSUPP;
2494 2493
2495 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 2494 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
@@ -3120,9 +3119,17 @@ static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3120 params->chandef.chan->band) 3119 params->chandef.chan->band)
3121 return -EINVAL; 3120 return -EINVAL;
3122 3121
3122 ifmsh->chsw_init = true;
3123 if (!ifmsh->pre_value)
3124 ifmsh->pre_value = 1;
3125 else
3126 ifmsh->pre_value++;
3127
3123 err = ieee80211_mesh_csa_beacon(sdata, params, true); 3128 err = ieee80211_mesh_csa_beacon(sdata, params, true);
3124 if (err < 0) 3129 if (err < 0) {
3130 ifmsh->chsw_init = false;
3125 return err; 3131 return err;
3132 }
3126 break; 3133 break;
3127#endif 3134#endif
3128 default: 3135 default:
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 531be040b9ae..27a39de89679 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -823,6 +823,10 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
823 if (err) 823 if (err)
824 return false; 824 return false;
825 825
826 /* channel switch is not supported, disconnect */
827 if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
828 goto disconnect;
829
826 params.count = csa_ie.count; 830 params.count = csa_ie.count;
827 params.chandef = csa_ie.chandef; 831 params.chandef = csa_ie.chandef;
828 832
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 29dc505be125..4aea4e791113 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1228,6 +1228,7 @@ struct ieee80211_csa_ie {
1228 u8 mode; 1228 u8 mode;
1229 u8 count; 1229 u8 count;
1230 u8 ttl; 1230 u8 ttl;
1231 u16 pre_value;
1231}; 1232};
1232 1233
1233/* Parsed Information Elements */ 1234/* Parsed Information Elements */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ff101ea1d9ae..36c3a4cbcabf 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1325,7 +1325,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1325 sdata->vif.bss_conf.bssid = NULL; 1325 sdata->vif.bss_conf.bssid = NULL;
1326 break; 1326 break;
1327 case NL80211_IFTYPE_AP_VLAN: 1327 case NL80211_IFTYPE_AP_VLAN:
1328 break;
1329 case NL80211_IFTYPE_P2P_DEVICE: 1328 case NL80211_IFTYPE_P2P_DEVICE:
1330 sdata->vif.bss_conf.bssid = sdata->vif.addr; 1329 sdata->vif.bss_conf.bssid = sdata->vif.addr;
1331 break; 1330 break;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 21d5d44444d0..7d1c3ac48ed9 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -940,6 +940,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
940 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 940 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
941 result); 941 result);
942 942
943 local->hw.conf.flags = IEEE80211_CONF_IDLE;
944
943 ieee80211_led_init(local); 945 ieee80211_led_init(local);
944 946
945 rtnl_lock(); 947 rtnl_lock();
@@ -1047,6 +1049,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1047 1049
1048 cancel_work_sync(&local->restart_work); 1050 cancel_work_sync(&local->restart_work);
1049 cancel_work_sync(&local->reconfig_filter); 1051 cancel_work_sync(&local->reconfig_filter);
1052 flush_work(&local->sched_scan_stopped_work);
1050 1053
1051 ieee80211_clear_tx_pending(local); 1054 ieee80211_clear_tx_pending(local);
1052 rate_control_deinitialize(local); 1055 rate_control_deinitialize(local);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 896fe3bd599e..ba105257d03f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -943,14 +943,19 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
943 params.chandef.chan->center_freq); 943 params.chandef.chan->center_freq);
944 944
945 params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT; 945 params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT;
946 if (beacon) 946 if (beacon) {
947 ifmsh->chsw_ttl = csa_ie.ttl - 1; 947 ifmsh->chsw_ttl = csa_ie.ttl - 1;
948 else 948 if (ifmsh->pre_value >= csa_ie.pre_value)
949 ifmsh->chsw_ttl = 0; 949 return false;
950 ifmsh->pre_value = csa_ie.pre_value;
951 }
950 952
951 if (ifmsh->chsw_ttl > 0) 953 if (ifmsh->chsw_ttl < ifmsh->mshcfg.dot11MeshTTL) {
952 if (ieee80211_mesh_csa_beacon(sdata, &params, false) < 0) 954 if (ieee80211_mesh_csa_beacon(sdata, &params, false) < 0)
953 return false; 955 return false;
956 } else {
957 return false;
958 }
954 959
955 sdata->csa_radar_required = params.radar_required; 960 sdata->csa_radar_required = params.radar_required;
956 961
@@ -1163,7 +1168,6 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
1163 offset_ttl = (len < 42) ? 7 : 10; 1168 offset_ttl = (len < 42) ? 7 : 10;
1164 *(pos + offset_ttl) -= 1; 1169 *(pos + offset_ttl) -= 1;
1165 *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; 1170 *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
1166 sdata->u.mesh.chsw_ttl = *(pos + offset_ttl);
1167 1171
1168 memcpy(mgmt_fwd, mgmt, len); 1172 memcpy(mgmt_fwd, mgmt, len);
1169 eth_broadcast_addr(mgmt_fwd->da); 1173 eth_broadcast_addr(mgmt_fwd->da);
@@ -1182,7 +1186,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
1182 u16 pre_value; 1186 u16 pre_value;
1183 bool fwd_csa = true; 1187 bool fwd_csa = true;
1184 size_t baselen; 1188 size_t baselen;
1185 u8 *pos, ttl; 1189 u8 *pos;
1186 1190
1187 if (mgmt->u.action.u.measurement.action_code != 1191 if (mgmt->u.action.u.measurement.action_code !=
1188 WLAN_ACTION_SPCT_CHL_SWITCH) 1192 WLAN_ACTION_SPCT_CHL_SWITCH)
@@ -1193,8 +1197,8 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
1193 u.action.u.chan_switch.variable); 1197 u.action.u.chan_switch.variable);
1194 ieee802_11_parse_elems(pos, len - baselen, false, &elems); 1198 ieee802_11_parse_elems(pos, len - baselen, false, &elems);
1195 1199
1196 ttl = elems.mesh_chansw_params_ie->mesh_ttl; 1200 ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
1197 if (!--ttl) 1201 if (!--ifmsh->chsw_ttl)
1198 fwd_csa = false; 1202 fwd_csa = false;
1199 1203
1200 pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value); 1204 pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d7504ab61a34..b3a3ce316656 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1910,6 +1910,8 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1910 if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) 1910 if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
1911 already = true; 1911 already = true;
1912 1912
1913 ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
1914
1913 mutex_unlock(&sdata->local->mtx); 1915 mutex_unlock(&sdata->local->mtx);
1914 1916
1915 if (already) 1917 if (already)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 5d60779a0c1b..4096ff6cc24f 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -226,7 +226,7 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
226 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); 226 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
227 227
228 nsecs += minstrel_mcs_groups[group].duration[rate]; 228 nsecs += minstrel_mcs_groups[group].duration[rate];
229 tp = 1000000 * ((mr->probability * 1000) / nsecs); 229 tp = 1000000 * ((prob * 1000) / nsecs);
230 230
231 mr->cur_tp = MINSTREL_TRUNC(tp); 231 mr->cur_tp = MINSTREL_TRUNC(tp);
232} 232}
@@ -277,13 +277,15 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
277 if (!(mg->supported & BIT(i))) 277 if (!(mg->supported & BIT(i)))
278 continue; 278 continue;
279 279
280 index = MCS_GROUP_RATES * group + i;
281
280 /* initialize rates selections starting indexes */ 282 /* initialize rates selections starting indexes */
281 if (!mg_rates_valid) { 283 if (!mg_rates_valid) {
282 mg->max_tp_rate = mg->max_tp_rate2 = 284 mg->max_tp_rate = mg->max_tp_rate2 =
283 mg->max_prob_rate = i; 285 mg->max_prob_rate = i;
284 if (!mi_rates_valid) { 286 if (!mi_rates_valid) {
285 mi->max_tp_rate = mi->max_tp_rate2 = 287 mi->max_tp_rate = mi->max_tp_rate2 =
286 mi->max_prob_rate = i; 288 mi->max_prob_rate = index;
287 mi_rates_valid = true; 289 mi_rates_valid = true;
288 } 290 }
289 mg_rates_valid = true; 291 mg_rates_valid = true;
@@ -291,7 +293,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
291 293
292 mr = &mg->rates[i]; 294 mr = &mg->rates[i];
293 mr->retry_updated = false; 295 mr->retry_updated = false;
294 index = MCS_GROUP_RATES * group + i;
295 minstrel_calc_rate_ewma(mr); 296 minstrel_calc_rate_ewma(mr);
296 minstrel_ht_calc_tp(mi, group, i); 297 minstrel_ht_calc_tp(mi, group, i);
297 298
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index caecef870c0e..2b0debb0422b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -911,7 +911,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
911 u16 sc; 911 u16 sc;
912 u8 tid, ack_policy; 912 u8 tid, ack_policy;
913 913
914 if (!ieee80211_is_data_qos(hdr->frame_control)) 914 if (!ieee80211_is_data_qos(hdr->frame_control) ||
915 is_multicast_ether_addr(hdr->addr1))
915 goto dont_reorder; 916 goto dont_reorder;
916 917
917 /* 918 /*
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 5ad66a83ef7f..bcc4833d7542 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -1088,6 +1088,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
1088 1088
1089 trace_api_sched_scan_stopped(local); 1089 trace_api_sched_scan_stopped(local);
1090 1090
1091 ieee80211_queue_work(&local->hw, &local->sched_scan_stopped_work); 1091 schedule_work(&local->sched_scan_stopped_work);
1092} 1092}
1093EXPORT_SYMBOL(ieee80211_sched_scan_stopped); 1093EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index a40da20b32e0..6ab009070084 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -78,6 +78,8 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
78 if (elems->mesh_chansw_params_ie) { 78 if (elems->mesh_chansw_params_ie) {
79 csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl; 79 csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl;
80 csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags; 80 csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags;
81 csa_ie->pre_value = le16_to_cpu(
82 elems->mesh_chansw_params_ie->mesh_pre_value);
81 } 83 }
82 84
83 new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band); 85 new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 592a18171f95..9f9b9bd3fd44 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2278,17 +2278,15 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work)
2278{ 2278{
2279 struct ieee80211_local *local = 2279 struct ieee80211_local *local =
2280 container_of(work, struct ieee80211_local, radar_detected_work); 2280 container_of(work, struct ieee80211_local, radar_detected_work);
2281 struct cfg80211_chan_def chandef; 2281 struct cfg80211_chan_def chandef = local->hw.conf.chandef;
2282 2282
2283 ieee80211_dfs_cac_cancel(local); 2283 ieee80211_dfs_cac_cancel(local);
2284 2284
2285 if (local->use_chanctx) 2285 if (local->use_chanctx)
2286 /* currently not handled */ 2286 /* currently not handled */
2287 WARN_ON(1); 2287 WARN_ON(1);
2288 else { 2288 else
2289 chandef = local->hw.conf.chandef;
2290 cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL); 2289 cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
2291 }
2292} 2290}
2293 2291
2294void ieee80211_radar_detected(struct ieee80211_hw *hw) 2292void ieee80211_radar_detected(struct ieee80211_hw *hw)
@@ -2459,14 +2457,9 @@ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
2459 WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00; 2457 WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00;
2460 put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */ 2458 put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */
2461 pos += 2; 2459 pos += 2;
2462 if (!ifmsh->pre_value)
2463 ifmsh->pre_value = 1;
2464 else
2465 ifmsh->pre_value++;
2466 pre_value = cpu_to_le16(ifmsh->pre_value); 2460 pre_value = cpu_to_le16(ifmsh->pre_value);
2467 memcpy(pos, &pre_value, 2); /* Precedence Value */ 2461 memcpy(pos, &pre_value, 2); /* Precedence Value */
2468 pos += 2; 2462 pos += 2;
2469 ifmsh->chsw_init = true;
2470 } 2463 }
2471 2464
2472 ieee80211_tx_skb(sdata, skb); 2465 ieee80211_tx_skb(sdata, skb);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 48acec17e27a..c3398cd99b94 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -909,7 +909,7 @@ config NETFILTER_XT_MATCH_CONNLABEL
909 connection simultaneously. 909 connection simultaneously.
910 910
911config NETFILTER_XT_MATCH_CONNLIMIT 911config NETFILTER_XT_MATCH_CONNLIMIT
912 tristate '"connlimit" match support"' 912 tristate '"connlimit" match support'
913 depends on NF_CONNTRACK 913 depends on NF_CONNTRACK
914 depends on NETFILTER_ADVANCED 914 depends on NETFILTER_ADVANCED
915 ---help--- 915 ---help---
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index 2bc2dec20b00..6226803fc490 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -59,7 +59,7 @@ hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
59 u32 *multi) 59 u32 *multi)
60{ 60{
61 return ip1->ipcmp == ip2->ipcmp && 61 return ip1->ipcmp == ip2->ipcmp &&
62 ip2->ccmp == ip2->ccmp; 62 ip1->ccmp == ip2->ccmp;
63} 63}
64 64
65static inline int 65static inline int
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 1ded5c6d268c..35be035ee0ce 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3580,7 +3580,7 @@ out:
3580} 3580}
3581 3581
3582 3582
3583static struct genl_ops ip_vs_genl_ops[] __read_mostly = { 3583static const struct genl_ops ip_vs_genl_ops[] __read_mostly = {
3584 { 3584 {
3585 .cmd = IPVS_CMD_NEW_SERVICE, 3585 .cmd = IPVS_CMD_NEW_SERVICE,
3586 .flags = GENL_ADMIN_PERM, 3586 .flags = GENL_ADMIN_PERM,
@@ -3679,7 +3679,7 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
3679static int __init ip_vs_genl_register(void) 3679static int __init ip_vs_genl_register(void)
3680{ 3680{
3681 return genl_register_family_with_ops(&ip_vs_genl_family, 3681 return genl_register_family_with_ops(&ip_vs_genl_family,
3682 ip_vs_genl_ops, ARRAY_SIZE(ip_vs_genl_ops)); 3682 ip_vs_genl_ops);
3683} 3683}
3684 3684
3685static void ip_vs_genl_unregister(void) 3685static void ip_vs_genl_unregister(void)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e22d950c60b3..43549eb7a7be 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -764,9 +764,10 @@ void nf_conntrack_free(struct nf_conn *ct)
764 struct net *net = nf_ct_net(ct); 764 struct net *net = nf_ct_net(ct);
765 765
766 nf_ct_ext_destroy(ct); 766 nf_ct_ext_destroy(ct);
767 atomic_dec(&net->ct.count);
768 nf_ct_ext_free(ct); 767 nf_ct_ext_free(ct);
769 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 768 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
769 smp_mb__before_atomic_dec();
770 atomic_dec(&net->ct.count);
770} 771}
771EXPORT_SYMBOL_GPL(nf_conntrack_free); 772EXPORT_SYMBOL_GPL(nf_conntrack_free);
772 773
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index 5f9bfd060dea..17c1bcb182c6 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -41,8 +41,8 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
41 spin_lock_bh(&ct->lock); 41 spin_lock_bh(&ct->lock);
42 this_way = &seqadj->seq[dir]; 42 this_way = &seqadj->seq[dir];
43 if (this_way->offset_before == this_way->offset_after || 43 if (this_way->offset_before == this_way->offset_after ||
44 before(this_way->correction_pos, seq)) { 44 before(this_way->correction_pos, ntohl(seq))) {
45 this_way->correction_pos = seq; 45 this_way->correction_pos = ntohl(seq);
46 this_way->offset_before = this_way->offset_after; 46 this_way->offset_before = this_way->offset_after;
47 this_way->offset_after += off; 47 this_way->offset_after += off;
48 } 48 }
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index cdf4567ba9b3..9858e3e51a3a 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -151,9 +151,10 @@ void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
151 opts->tsecr = opts->tsval; 151 opts->tsecr = opts->tsval;
152 opts->tsval = tcp_time_stamp & ~0x3f; 152 opts->tsval = tcp_time_stamp & ~0x3f;
153 153
154 if (opts->options & XT_SYNPROXY_OPT_WSCALE) 154 if (opts->options & XT_SYNPROXY_OPT_WSCALE) {
155 opts->tsval |= info->wscale; 155 opts->tsval |= opts->wscale;
156 else 156 opts->wscale = info->wscale;
157 } else
157 opts->tsval |= 0xf; 158 opts->tsval |= 0xf;
158 159
159 if (opts->options & XT_SYNPROXY_OPT_SACK_PERM) 160 if (opts->options & XT_SYNPROXY_OPT_SACK_PERM)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index dcddc49c0e08..f93b7d06f4be 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1717,6 +1717,19 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
1717 return -ENOENT; 1717 return -ENOENT;
1718} 1718}
1719 1719
1720static int nf_table_delrule_by_chain(struct nft_ctx *ctx)
1721{
1722 struct nft_rule *rule;
1723 int err;
1724
1725 list_for_each_entry(rule, &ctx->chain->rules, list) {
1726 err = nf_tables_delrule_one(ctx, rule);
1727 if (err < 0)
1728 return err;
1729 }
1730 return 0;
1731}
1732
1720static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, 1733static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1721 const struct nlmsghdr *nlh, 1734 const struct nlmsghdr *nlh,
1722 const struct nlattr * const nla[]) 1735 const struct nlattr * const nla[])
@@ -1725,8 +1738,8 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1725 const struct nft_af_info *afi; 1738 const struct nft_af_info *afi;
1726 struct net *net = sock_net(skb->sk); 1739 struct net *net = sock_net(skb->sk);
1727 const struct nft_table *table; 1740 const struct nft_table *table;
1728 struct nft_chain *chain; 1741 struct nft_chain *chain = NULL;
1729 struct nft_rule *rule, *tmp; 1742 struct nft_rule *rule;
1730 int family = nfmsg->nfgen_family, err = 0; 1743 int family = nfmsg->nfgen_family, err = 0;
1731 struct nft_ctx ctx; 1744 struct nft_ctx ctx;
1732 1745
@@ -1738,22 +1751,29 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1738 if (IS_ERR(table)) 1751 if (IS_ERR(table))
1739 return PTR_ERR(table); 1752 return PTR_ERR(table);
1740 1753
1741 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); 1754 if (nla[NFTA_RULE_CHAIN]) {
1742 if (IS_ERR(chain)) 1755 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
1743 return PTR_ERR(chain); 1756 if (IS_ERR(chain))
1757 return PTR_ERR(chain);
1758 }
1744 1759
1745 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1760 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
1746 1761
1747 if (nla[NFTA_RULE_HANDLE]) { 1762 if (chain) {
1748 rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); 1763 if (nla[NFTA_RULE_HANDLE]) {
1749 if (IS_ERR(rule)) 1764 rule = nf_tables_rule_lookup(chain,
1750 return PTR_ERR(rule); 1765 nla[NFTA_RULE_HANDLE]);
1766 if (IS_ERR(rule))
1767 return PTR_ERR(rule);
1751 1768
1752 err = nf_tables_delrule_one(&ctx, rule);
1753 } else {
1754 /* Remove all rules in this chain */
1755 list_for_each_entry_safe(rule, tmp, &chain->rules, list) {
1756 err = nf_tables_delrule_one(&ctx, rule); 1769 err = nf_tables_delrule_one(&ctx, rule);
1770 } else {
1771 err = nf_table_delrule_by_chain(&ctx);
1772 }
1773 } else {
1774 list_for_each_entry(chain, &table->chains, list) {
1775 ctx.chain = chain;
1776 err = nf_table_delrule_by_chain(&ctx);
1757 if (err < 0) 1777 if (err < 0)
1758 break; 1778 break;
1759 } 1779 }
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index a82667c64729..da0c1f4ada12 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -128,7 +128,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
128 [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, 128 [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 },
129}; 129};
130 130
131static u8 nft_parse_compat(const struct nlattr *attr, bool *inv) 131static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv)
132{ 132{
133 struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; 133 struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
134 u32 flags; 134 u32 flags;
@@ -148,7 +148,8 @@ static u8 nft_parse_compat(const struct nlattr *attr, bool *inv)
148 if (flags & NFT_RULE_COMPAT_F_INV) 148 if (flags & NFT_RULE_COMPAT_F_INV)
149 *inv = true; 149 *inv = true;
150 150
151 return ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO])); 151 *proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
152 return 0;
152} 153}
153 154
154static int 155static int
@@ -166,8 +167,11 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
166 167
167 target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); 168 target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
168 169
169 if (ctx->nla[NFTA_RULE_COMPAT]) 170 if (ctx->nla[NFTA_RULE_COMPAT]) {
170 proto = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &inv); 171 ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
172 if (ret < 0)
173 goto err;
174 }
171 175
172 nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); 176 nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
173 177
@@ -356,8 +360,11 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
356 360
357 match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); 361 match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
358 362
359 if (ctx->nla[NFTA_RULE_COMPAT]) 363 if (ctx->nla[NFTA_RULE_COMPAT]) {
360 proto = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &inv); 364 ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
365 if (ret < 0)
366 goto err;
367 }
361 368
362 nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); 369 nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
363 370
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9ff035c71403..a3910fc2122b 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -325,21 +325,24 @@ static void htable_gc(unsigned long htlong)
325 add_timer(&ht->timer); 325 add_timer(&ht->timer);
326} 326}
327 327
328static void htable_destroy(struct xt_hashlimit_htable *hinfo) 328static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
329{ 329{
330 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net); 330 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
331 struct proc_dir_entry *parent; 331 struct proc_dir_entry *parent;
332 332
333 del_timer_sync(&hinfo->timer);
334
335 if (hinfo->family == NFPROTO_IPV4) 333 if (hinfo->family == NFPROTO_IPV4)
336 parent = hashlimit_net->ipt_hashlimit; 334 parent = hashlimit_net->ipt_hashlimit;
337 else 335 else
338 parent = hashlimit_net->ip6t_hashlimit; 336 parent = hashlimit_net->ip6t_hashlimit;
339 337
340 if(parent != NULL) 338 if (parent != NULL)
341 remove_proc_entry(hinfo->name, parent); 339 remove_proc_entry(hinfo->name, parent);
340}
342 341
342static void htable_destroy(struct xt_hashlimit_htable *hinfo)
343{
344 del_timer_sync(&hinfo->timer);
345 htable_remove_proc_entry(hinfo);
343 htable_selective_cleanup(hinfo, select_all); 346 htable_selective_cleanup(hinfo, select_all);
344 kfree(hinfo->name); 347 kfree(hinfo->name);
345 vfree(hinfo); 348 vfree(hinfo);
@@ -883,21 +886,15 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
883static void __net_exit hashlimit_proc_net_exit(struct net *net) 886static void __net_exit hashlimit_proc_net_exit(struct net *net)
884{ 887{
885 struct xt_hashlimit_htable *hinfo; 888 struct xt_hashlimit_htable *hinfo;
886 struct proc_dir_entry *pde;
887 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 889 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
888 890
889 /* recent_net_exit() is called before recent_mt_destroy(). Make sure 891 /* hashlimit_net_exit() is called before hashlimit_mt_destroy().
890 * that the parent xt_recent proc entry is is empty before trying to 892 * Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc
891 * remove it. 893 * entries is empty before trying to remove it.
892 */ 894 */
893 mutex_lock(&hashlimit_mutex); 895 mutex_lock(&hashlimit_mutex);
894 pde = hashlimit_net->ipt_hashlimit;
895 if (pde == NULL)
896 pde = hashlimit_net->ip6t_hashlimit;
897
898 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) 896 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
899 remove_proc_entry(hinfo->name, pde); 897 htable_remove_proc_entry(hinfo);
900
901 hashlimit_net->ipt_hashlimit = NULL; 898 hashlimit_net->ipt_hashlimit = NULL;
902 hashlimit_net->ip6t_hashlimit = NULL; 899 hashlimit_net->ip6t_hashlimit = NULL;
903 mutex_unlock(&hashlimit_mutex); 900 mutex_unlock(&hashlimit_mutex);
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index a1100640495d..69345cebe3a3 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -737,7 +737,7 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
737 * NetLabel Generic NETLINK Command Definitions 737 * NetLabel Generic NETLINK Command Definitions
738 */ 738 */
739 739
740static struct genl_ops netlbl_cipsov4_ops[] = { 740static const struct genl_ops netlbl_cipsov4_ops[] = {
741 { 741 {
742 .cmd = NLBL_CIPSOV4_C_ADD, 742 .cmd = NLBL_CIPSOV4_C_ADD,
743 .flags = GENL_ADMIN_PERM, 743 .flags = GENL_ADMIN_PERM,
@@ -783,5 +783,5 @@ static struct genl_ops netlbl_cipsov4_ops[] = {
783int __init netlbl_cipsov4_genl_init(void) 783int __init netlbl_cipsov4_genl_init(void)
784{ 784{
785 return genl_register_family_with_ops(&netlbl_cipsov4_gnl_family, 785 return genl_register_family_with_ops(&netlbl_cipsov4_gnl_family,
786 netlbl_cipsov4_ops, ARRAY_SIZE(netlbl_cipsov4_ops)); 786 netlbl_cipsov4_ops);
787} 787}
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index dd1c37d7acbc..8ef83ee97c6a 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -705,7 +705,7 @@ version_failure:
705 * NetLabel Generic NETLINK Command Definitions 705 * NetLabel Generic NETLINK Command Definitions
706 */ 706 */
707 707
708static struct genl_ops netlbl_mgmt_genl_ops[] = { 708static const struct genl_ops netlbl_mgmt_genl_ops[] = {
709 { 709 {
710 .cmd = NLBL_MGMT_C_ADD, 710 .cmd = NLBL_MGMT_C_ADD,
711 .flags = GENL_ADMIN_PERM, 711 .flags = GENL_ADMIN_PERM,
@@ -779,5 +779,5 @@ static struct genl_ops netlbl_mgmt_genl_ops[] = {
779int __init netlbl_mgmt_genl_init(void) 779int __init netlbl_mgmt_genl_init(void)
780{ 780{
781 return genl_register_family_with_ops(&netlbl_mgmt_gnl_family, 781 return genl_register_family_with_ops(&netlbl_mgmt_gnl_family,
782 netlbl_mgmt_genl_ops, ARRAY_SIZE(netlbl_mgmt_genl_ops)); 782 netlbl_mgmt_genl_ops);
783} 783}
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 8f0897407a2c..43817d73ccf9 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1323,7 +1323,7 @@ unlabel_staticlistdef_return:
1323 * NetLabel Generic NETLINK Command Definitions 1323 * NetLabel Generic NETLINK Command Definitions
1324 */ 1324 */
1325 1325
1326static struct genl_ops netlbl_unlabel_genl_ops[] = { 1326static const struct genl_ops netlbl_unlabel_genl_ops[] = {
1327 { 1327 {
1328 .cmd = NLBL_UNLABEL_C_STATICADD, 1328 .cmd = NLBL_UNLABEL_C_STATICADD,
1329 .flags = GENL_ADMIN_PERM, 1329 .flags = GENL_ADMIN_PERM,
@@ -1397,7 +1397,7 @@ static struct genl_ops netlbl_unlabel_genl_ops[] = {
1397int __init netlbl_unlabel_genl_init(void) 1397int __init netlbl_unlabel_genl_init(void)
1398{ 1398{
1399 return genl_register_family_with_ops(&netlbl_unlabel_gnl_family, 1399 return genl_register_family_with_ops(&netlbl_unlabel_gnl_family,
1400 netlbl_unlabel_genl_ops, ARRAY_SIZE(netlbl_unlabel_genl_ops)); 1400 netlbl_unlabel_genl_ops);
1401} 1401}
1402 1402
1403/* 1403/*
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 8df7f64c6db3..bca50b95c182 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2017,7 +2017,7 @@ out:
2017 * netlink_set_err - report error to broadcast listeners 2017 * netlink_set_err - report error to broadcast listeners
2018 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() 2018 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2019 * @portid: the PORTID of a process that we want to skip (if any) 2019 * @portid: the PORTID of a process that we want to skip (if any)
2020 * @groups: the broadcast group that will notice the error 2020 * @group: the broadcast group that will notice the error
2021 * @code: error code, must be negative (as usual in kernelspace) 2021 * @code: error code, must be negative (as usual in kernelspace)
2022 * 2022 *
2023 * This function returns the number of broadcast listeners that have set the 2023 * This function returns the number of broadcast listeners that have set the
@@ -2335,8 +2335,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2335 } 2335 }
2336#endif 2336#endif
2337 2337
2338 msg->msg_namelen = 0;
2339
2340 copied = data_skb->len; 2338 copied = data_skb->len;
2341 if (len < copied) { 2339 if (len < copied) {
2342 msg->msg_flags |= MSG_TRUNC; 2340 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 0c741cec4d0d..713671ae45af 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -65,12 +65,27 @@ static struct list_head family_ht[GENL_FAM_TAB_SIZE];
65 * To avoid an allocation at boot of just one unsigned long, 65 * To avoid an allocation at boot of just one unsigned long,
66 * declare it global instead. 66 * declare it global instead.
67 * Bit 0 is marked as already used since group 0 is invalid. 67 * Bit 0 is marked as already used since group 0 is invalid.
68 * Bit 1 is marked as already used since the drop-monitor code
69 * abuses the API and thinks it can statically use group 1.
70 * That group will typically conflict with other groups that
71 * any proper users use.
72 * Bit 16 is marked as used since it's used for generic netlink
73 * and the code no longer marks pre-reserved IDs as used.
74 * Bit 17 is marked as already used since the VFS quota code
75 * also abused this API and relied on family == group ID, we
76 * cater to that by giving it a static family and group ID.
77 * Bit 18 is marked as already used since the PMCRAID driver
78 * did the same thing as the VFS quota code (maybe copied?)
68 */ 79 */
69static unsigned long mc_group_start = 0x1; 80static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
81 BIT(GENL_ID_VFS_DQUOT) |
82 BIT(GENL_ID_PMCRAID);
70static unsigned long *mc_groups = &mc_group_start; 83static unsigned long *mc_groups = &mc_group_start;
71static unsigned long mc_groups_longs = 1; 84static unsigned long mc_groups_longs = 1;
72 85
73static int genl_ctrl_event(int event, void *data); 86static int genl_ctrl_event(int event, struct genl_family *family,
87 const struct genl_multicast_group *grp,
88 int grp_id);
74 89
75static inline unsigned int genl_family_hash(unsigned int id) 90static inline unsigned int genl_family_hash(unsigned int id)
76{ 91{
@@ -106,13 +121,13 @@ static struct genl_family *genl_family_find_byname(char *name)
106 return NULL; 121 return NULL;
107} 122}
108 123
109static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) 124static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
110{ 125{
111 struct genl_ops *ops; 126 int i;
112 127
113 list_for_each_entry(ops, &family->ops_list, ops_list) 128 for (i = 0; i < family->n_ops; i++)
114 if (ops->cmd == cmd) 129 if (family->ops[i].cmd == cmd)
115 return ops; 130 return &family->ops[i];
116 131
117 return NULL; 132 return NULL;
118} 133}
@@ -126,7 +141,9 @@ static u16 genl_generate_id(void)
126 int i; 141 int i;
127 142
128 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { 143 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
129 if (!genl_family_find_byid(id_gen_idx)) 144 if (id_gen_idx != GENL_ID_VFS_DQUOT &&
145 id_gen_idx != GENL_ID_PMCRAID &&
146 !genl_family_find_byid(id_gen_idx))
130 return id_gen_idx; 147 return id_gen_idx;
131 if (++id_gen_idx > GENL_MAX_ID) 148 if (++id_gen_idx > GENL_MAX_ID)
132 id_gen_idx = GENL_MIN_ID; 149 id_gen_idx = GENL_MIN_ID;
@@ -135,62 +152,113 @@ static u16 genl_generate_id(void)
135 return 0; 152 return 0;
136} 153}
137 154
138static struct genl_multicast_group notify_grp; 155static int genl_allocate_reserve_groups(int n_groups, int *first_id)
139
140/**
141 * genl_register_mc_group - register a multicast group
142 *
143 * Registers the specified multicast group and notifies userspace
144 * about the new group.
145 *
146 * Returns 0 on success or a negative error code.
147 *
148 * @family: The generic netlink family the group shall be registered for.
149 * @grp: The group to register, must have a name.
150 */
151int genl_register_mc_group(struct genl_family *family,
152 struct genl_multicast_group *grp)
153{ 156{
154 int id;
155 unsigned long *new_groups; 157 unsigned long *new_groups;
156 int err = 0; 158 int start = 0;
159 int i;
160 int id;
161 bool fits;
162
163 do {
164 if (start == 0)
165 id = find_first_zero_bit(mc_groups,
166 mc_groups_longs *
167 BITS_PER_LONG);
168 else
169 id = find_next_zero_bit(mc_groups,
170 mc_groups_longs * BITS_PER_LONG,
171 start);
172
173 fits = true;
174 for (i = id;
175 i < min_t(int, id + n_groups,
176 mc_groups_longs * BITS_PER_LONG);
177 i++) {
178 if (test_bit(i, mc_groups)) {
179 start = i;
180 fits = false;
181 break;
182 }
183 }
157 184
158 BUG_ON(grp->name[0] == '\0'); 185 if (id >= mc_groups_longs * BITS_PER_LONG) {
159 BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL); 186 unsigned long new_longs = mc_groups_longs +
187 BITS_TO_LONGS(n_groups);
188 size_t nlen = new_longs * sizeof(unsigned long);
189
190 if (mc_groups == &mc_group_start) {
191 new_groups = kzalloc(nlen, GFP_KERNEL);
192 if (!new_groups)
193 return -ENOMEM;
194 mc_groups = new_groups;
195 *mc_groups = mc_group_start;
196 } else {
197 new_groups = krealloc(mc_groups, nlen,
198 GFP_KERNEL);
199 if (!new_groups)
200 return -ENOMEM;
201 mc_groups = new_groups;
202 for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
203 mc_groups[mc_groups_longs + i] = 0;
204 }
205 mc_groups_longs = new_longs;
206 }
207 } while (!fits);
160 208
161 genl_lock_all(); 209 for (i = id; i < id + n_groups; i++)
210 set_bit(i, mc_groups);
211 *first_id = id;
212 return 0;
213}
162 214
163 /* special-case our own group */ 215static struct genl_family genl_ctrl;
164 if (grp == &notify_grp)
165 id = GENL_ID_CTRL;
166 else
167 id = find_first_zero_bit(mc_groups,
168 mc_groups_longs * BITS_PER_LONG);
169 216
217static int genl_validate_assign_mc_groups(struct genl_family *family)
218{
219 int first_id;
220 int n_groups = family->n_mcgrps;
221 int err = 0, i;
222 bool groups_allocated = false;
170 223
171 if (id >= mc_groups_longs * BITS_PER_LONG) { 224 if (!n_groups)
172 size_t nlen = (mc_groups_longs + 1) * sizeof(unsigned long); 225 return 0;
173 226
174 if (mc_groups == &mc_group_start) { 227 for (i = 0; i < n_groups; i++) {
175 new_groups = kzalloc(nlen, GFP_KERNEL); 228 const struct genl_multicast_group *grp = &family->mcgrps[i];
176 if (!new_groups) { 229
177 err = -ENOMEM; 230 if (WARN_ON(grp->name[0] == '\0'))
178 goto out; 231 return -EINVAL;
179 } 232 if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
180 mc_groups = new_groups; 233 return -EINVAL;
181 *mc_groups = mc_group_start; 234 }
182 } else { 235
183 new_groups = krealloc(mc_groups, nlen, GFP_KERNEL); 236 /* special-case our own group and hacks */
184 if (!new_groups) { 237 if (family == &genl_ctrl) {
185 err = -ENOMEM; 238 first_id = GENL_ID_CTRL;
186 goto out; 239 BUG_ON(n_groups != 1);
187 } 240 } else if (strcmp(family->name, "NET_DM") == 0) {
188 mc_groups = new_groups; 241 first_id = 1;
189 mc_groups[mc_groups_longs] = 0; 242 BUG_ON(n_groups != 1);
190 } 243 } else if (family->id == GENL_ID_VFS_DQUOT) {
191 mc_groups_longs++; 244 first_id = GENL_ID_VFS_DQUOT;
245 BUG_ON(n_groups != 1);
246 } else if (family->id == GENL_ID_PMCRAID) {
247 first_id = GENL_ID_PMCRAID;
248 BUG_ON(n_groups != 1);
249 } else {
250 groups_allocated = true;
251 err = genl_allocate_reserve_groups(n_groups, &first_id);
252 if (err)
253 return err;
192 } 254 }
193 255
256 family->mcgrp_offset = first_id;
257
258 /* if still initializing, can't and don't need to to realloc bitmaps */
259 if (!init_net.genl_sock)
260 return 0;
261
194 if (family->netnsok) { 262 if (family->netnsok) {
195 struct net *net; 263 struct net *net;
196 264
@@ -206,9 +274,7 @@ int genl_register_mc_group(struct genl_family *family,
206 * number of _possible_ groups has been 274 * number of _possible_ groups has been
207 * increased on some sockets which is ok. 275 * increased on some sockets which is ok.
208 */ 276 */
209 rcu_read_unlock(); 277 break;
210 netlink_table_ungrab();
211 goto out;
212 } 278 }
213 } 279 }
214 rcu_read_unlock(); 280 rcu_read_unlock();
@@ -216,152 +282,67 @@ int genl_register_mc_group(struct genl_family *family,
216 } else { 282 } else {
217 err = netlink_change_ngroups(init_net.genl_sock, 283 err = netlink_change_ngroups(init_net.genl_sock,
218 mc_groups_longs * BITS_PER_LONG); 284 mc_groups_longs * BITS_PER_LONG);
219 if (err)
220 goto out;
221 } 285 }
222 286
223 grp->id = id; 287 if (groups_allocated && err) {
224 set_bit(id, mc_groups); 288 for (i = 0; i < family->n_mcgrps; i++)
225 list_add_tail(&grp->list, &family->mcast_groups); 289 clear_bit(family->mcgrp_offset + i, mc_groups);
226 grp->family = family; 290 }
227 291
228 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, grp);
229 out:
230 genl_unlock_all();
231 return err; 292 return err;
232} 293}
233EXPORT_SYMBOL(genl_register_mc_group);
234 294
235static void __genl_unregister_mc_group(struct genl_family *family, 295static void genl_unregister_mc_groups(struct genl_family *family)
236 struct genl_multicast_group *grp)
237{ 296{
238 struct net *net; 297 struct net *net;
239 BUG_ON(grp->family != family); 298 int i;
240 299
241 netlink_table_grab(); 300 netlink_table_grab();
242 rcu_read_lock(); 301 rcu_read_lock();
243 for_each_net_rcu(net) 302 for_each_net_rcu(net) {
244 __netlink_clear_multicast_users(net->genl_sock, grp->id); 303 for (i = 0; i < family->n_mcgrps; i++)
304 __netlink_clear_multicast_users(
305 net->genl_sock, family->mcgrp_offset + i);
306 }
245 rcu_read_unlock(); 307 rcu_read_unlock();
246 netlink_table_ungrab(); 308 netlink_table_ungrab();
247 309
248 clear_bit(grp->id, mc_groups); 310 for (i = 0; i < family->n_mcgrps; i++) {
249 list_del(&grp->list); 311 int grp_id = family->mcgrp_offset + i;
250 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp);
251 grp->id = 0;
252 grp->family = NULL;
253}
254 312
255/** 313 if (grp_id != 1)
256 * genl_unregister_mc_group - unregister a multicast group 314 clear_bit(grp_id, mc_groups);
257 * 315 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
258 * Unregisters the specified multicast group and notifies userspace 316 &family->mcgrps[i], grp_id);
259 * about it. All current listeners on the group are removed. 317 }
260 *
261 * Note: It is not necessary to unregister all multicast groups before
262 * unregistering the family, unregistering the family will cause
263 * all assigned multicast groups to be unregistered automatically.
264 *
265 * @family: Generic netlink family the group belongs to.
266 * @grp: The group to unregister, must have been registered successfully
267 * previously.
268 */
269void genl_unregister_mc_group(struct genl_family *family,
270 struct genl_multicast_group *grp)
271{
272 genl_lock_all();
273 __genl_unregister_mc_group(family, grp);
274 genl_unlock_all();
275} 318}
276EXPORT_SYMBOL(genl_unregister_mc_group);
277 319
278static void genl_unregister_mc_groups(struct genl_family *family) 320static int genl_validate_ops(struct genl_family *family)
279{ 321{
280 struct genl_multicast_group *grp, *tmp; 322 const struct genl_ops *ops = family->ops;
323 unsigned int n_ops = family->n_ops;
324 int i, j;
281 325
282 list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list) 326 if (WARN_ON(n_ops && !ops))
283 __genl_unregister_mc_group(family, grp); 327 return -EINVAL;
284}
285
286/**
287 * genl_register_ops - register generic netlink operations
288 * @family: generic netlink family
289 * @ops: operations to be registered
290 *
291 * Registers the specified operations and assigns them to the specified
292 * family. Either a doit or dumpit callback must be specified or the
293 * operation will fail. Only one operation structure per command
294 * identifier may be registered.
295 *
296 * See include/net/genetlink.h for more documenation on the operations
297 * structure.
298 *
299 * Returns 0 on success or a negative error code.
300 */
301int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
302{
303 int err = -EINVAL;
304 328
305 if (ops->dumpit == NULL && ops->doit == NULL) 329 if (!n_ops)
306 goto errout; 330 return 0;
307 331
308 if (genl_get_cmd(ops->cmd, family)) { 332 for (i = 0; i < n_ops; i++) {
309 err = -EEXIST; 333 if (ops[i].dumpit == NULL && ops[i].doit == NULL)
310 goto errout; 334 return -EINVAL;
335 for (j = i + 1; j < n_ops; j++)
336 if (ops[i].cmd == ops[j].cmd)
337 return -EINVAL;
311 } 338 }
312 339
313 if (ops->dumpit) 340 /* family is not registered yet, so no locking needed */
314 ops->flags |= GENL_CMD_CAP_DUMP; 341 family->ops = ops;
315 if (ops->doit) 342 family->n_ops = n_ops;
316 ops->flags |= GENL_CMD_CAP_DO;
317 if (ops->policy)
318 ops->flags |= GENL_CMD_CAP_HASPOL;
319 343
320 genl_lock_all(); 344 return 0;
321 list_add_tail(&ops->ops_list, &family->ops_list);
322 genl_unlock_all();
323
324 genl_ctrl_event(CTRL_CMD_NEWOPS, ops);
325 err = 0;
326errout:
327 return err;
328}
329EXPORT_SYMBOL(genl_register_ops);
330
331/**
332 * genl_unregister_ops - unregister generic netlink operations
333 * @family: generic netlink family
334 * @ops: operations to be unregistered
335 *
336 * Unregisters the specified operations and unassigns them from the
337 * specified family. The operation blocks until the current message
338 * processing has finished and doesn't start again until the
339 * unregister process has finished.
340 *
341 * Note: It is not necessary to unregister all operations before
342 * unregistering the family, unregistering the family will cause
343 * all assigned operations to be unregistered automatically.
344 *
345 * Returns 0 on success or a negative error code.
346 */
347int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
348{
349 struct genl_ops *rc;
350
351 genl_lock_all();
352 list_for_each_entry(rc, &family->ops_list, ops_list) {
353 if (rc == ops) {
354 list_del(&ops->ops_list);
355 genl_unlock_all();
356 genl_ctrl_event(CTRL_CMD_DELOPS, ops);
357 return 0;
358 }
359 }
360 genl_unlock_all();
361
362 return -ENOENT;
363} 345}
364EXPORT_SYMBOL(genl_unregister_ops);
365 346
366/** 347/**
367 * __genl_register_family - register a generic netlink family 348 * __genl_register_family - register a generic netlink family
@@ -372,11 +353,14 @@ EXPORT_SYMBOL(genl_unregister_ops);
372 * The family id may equal GENL_ID_GENERATE causing an unique id to 353 * The family id may equal GENL_ID_GENERATE causing an unique id to
373 * be automatically generated and assigned. 354 * be automatically generated and assigned.
374 * 355 *
356 * The family's ops array must already be assigned, you can use the
357 * genl_register_family_with_ops() helper function.
358 *
375 * Return 0 on success or a negative error code. 359 * Return 0 on success or a negative error code.
376 */ 360 */
377int __genl_register_family(struct genl_family *family) 361int __genl_register_family(struct genl_family *family)
378{ 362{
379 int err = -EINVAL; 363 int err = -EINVAL, i;
380 364
381 if (family->id && family->id < GENL_MIN_ID) 365 if (family->id && family->id < GENL_MIN_ID)
382 goto errout; 366 goto errout;
@@ -384,8 +368,9 @@ int __genl_register_family(struct genl_family *family)
384 if (family->id > GENL_MAX_ID) 368 if (family->id > GENL_MAX_ID)
385 goto errout; 369 goto errout;
386 370
387 INIT_LIST_HEAD(&family->ops_list); 371 err = genl_validate_ops(family);
388 INIT_LIST_HEAD(&family->mcast_groups); 372 if (err)
373 return err;
389 374
390 genl_lock_all(); 375 genl_lock_all();
391 376
@@ -418,10 +403,18 @@ int __genl_register_family(struct genl_family *family)
418 } else 403 } else
419 family->attrbuf = NULL; 404 family->attrbuf = NULL;
420 405
406 err = genl_validate_assign_mc_groups(family);
407 if (err)
408 goto errout_locked;
409
421 list_add_tail(&family->family_list, genl_family_chain(family->id)); 410 list_add_tail(&family->family_list, genl_family_chain(family->id));
422 genl_unlock_all(); 411 genl_unlock_all();
423 412
424 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family); 413 /* send all events */
414 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
415 for (i = 0; i < family->n_mcgrps; i++)
416 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
417 &family->mcgrps[i], family->mcgrp_offset + i);
425 418
426 return 0; 419 return 0;
427 420
@@ -433,52 +426,6 @@ errout:
433EXPORT_SYMBOL(__genl_register_family); 426EXPORT_SYMBOL(__genl_register_family);
434 427
435/** 428/**
436 * __genl_register_family_with_ops - register a generic netlink family
437 * @family: generic netlink family
438 * @ops: operations to be registered
439 * @n_ops: number of elements to register
440 *
441 * Registers the specified family and operations from the specified table.
442 * Only one family may be registered with the same family name or identifier.
443 *
444 * The family id may equal GENL_ID_GENERATE causing an unique id to
445 * be automatically generated and assigned.
446 *
447 * Either a doit or dumpit callback must be specified for every registered
448 * operation or the function will fail. Only one operation structure per
449 * command identifier may be registered.
450 *
451 * See include/net/genetlink.h for more documenation on the operations
452 * structure.
453 *
454 * This is equivalent to calling genl_register_family() followed by
455 * genl_register_ops() for every operation entry in the table taking
456 * care to unregister the family on error path.
457 *
458 * Return 0 on success or a negative error code.
459 */
460int __genl_register_family_with_ops(struct genl_family *family,
461 struct genl_ops *ops, size_t n_ops)
462{
463 int err, i;
464
465 err = __genl_register_family(family);
466 if (err)
467 return err;
468
469 for (i = 0; i < n_ops; ++i, ++ops) {
470 err = genl_register_ops(family, ops);
471 if (err)
472 goto err_out;
473 }
474 return 0;
475err_out:
476 genl_unregister_family(family);
477 return err;
478}
479EXPORT_SYMBOL(__genl_register_family_with_ops);
480
481/**
482 * genl_unregister_family - unregister generic netlink family 429 * genl_unregister_family - unregister generic netlink family
483 * @family: generic netlink family 430 * @family: generic netlink family
484 * 431 *
@@ -499,11 +446,11 @@ int genl_unregister_family(struct genl_family *family)
499 continue; 446 continue;
500 447
501 list_del(&rc->family_list); 448 list_del(&rc->family_list);
502 INIT_LIST_HEAD(&family->ops_list); 449 family->n_ops = 0;
503 genl_unlock_all(); 450 genl_unlock_all();
504 451
505 kfree(family->attrbuf); 452 kfree(family->attrbuf);
506 genl_ctrl_event(CTRL_CMD_DELFAMILY, family); 453 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
507 return 0; 454 return 0;
508 } 455 }
509 456
@@ -546,7 +493,8 @@ EXPORT_SYMBOL(genlmsg_put);
546 493
547static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 494static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
548{ 495{
549 struct genl_ops *ops = cb->data; 496 /* our ops are always const - netlink API doesn't propagate that */
497 const struct genl_ops *ops = cb->data;
550 int rc; 498 int rc;
551 499
552 genl_lock(); 500 genl_lock();
@@ -557,7 +505,8 @@ static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
557 505
558static int genl_lock_done(struct netlink_callback *cb) 506static int genl_lock_done(struct netlink_callback *cb)
559{ 507{
560 struct genl_ops *ops = cb->data; 508 /* our ops are always const - netlink API doesn't propagate that */
509 const struct genl_ops *ops = cb->data;
561 int rc = 0; 510 int rc = 0;
562 511
563 if (ops->done) { 512 if (ops->done) {
@@ -572,7 +521,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
572 struct sk_buff *skb, 521 struct sk_buff *skb,
573 struct nlmsghdr *nlh) 522 struct nlmsghdr *nlh)
574{ 523{
575 struct genl_ops *ops; 524 const struct genl_ops *ops;
576 struct net *net = sock_net(skb->sk); 525 struct net *net = sock_net(skb->sk);
577 struct genl_info info; 526 struct genl_info info;
578 struct genlmsghdr *hdr = nlmsg_data(nlh); 527 struct genlmsghdr *hdr = nlmsg_data(nlh);
@@ -604,7 +553,8 @@ static int genl_family_rcv_msg(struct genl_family *family,
604 if (!family->parallel_ops) { 553 if (!family->parallel_ops) {
605 struct netlink_dump_control c = { 554 struct netlink_dump_control c = {
606 .module = family->module, 555 .module = family->module,
607 .data = ops, 556 /* we have const, but the netlink API doesn't */
557 .data = (void *)ops,
608 .dump = genl_lock_dumpit, 558 .dump = genl_lock_dumpit,
609 .done = genl_lock_done, 559 .done = genl_lock_done,
610 }; 560 };
@@ -726,24 +676,32 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
726 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) 676 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
727 goto nla_put_failure; 677 goto nla_put_failure;
728 678
729 if (!list_empty(&family->ops_list)) { 679 if (family->n_ops) {
730 struct nlattr *nla_ops; 680 struct nlattr *nla_ops;
731 struct genl_ops *ops; 681 int i;
732 int idx = 1;
733 682
734 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS); 683 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
735 if (nla_ops == NULL) 684 if (nla_ops == NULL)
736 goto nla_put_failure; 685 goto nla_put_failure;
737 686
738 list_for_each_entry(ops, &family->ops_list, ops_list) { 687 for (i = 0; i < family->n_ops; i++) {
739 struct nlattr *nest; 688 struct nlattr *nest;
689 const struct genl_ops *ops = &family->ops[i];
690 u32 op_flags = ops->flags;
740 691
741 nest = nla_nest_start(skb, idx++); 692 if (ops->dumpit)
693 op_flags |= GENL_CMD_CAP_DUMP;
694 if (ops->doit)
695 op_flags |= GENL_CMD_CAP_DO;
696 if (ops->policy)
697 op_flags |= GENL_CMD_CAP_HASPOL;
698
699 nest = nla_nest_start(skb, i + 1);
742 if (nest == NULL) 700 if (nest == NULL)
743 goto nla_put_failure; 701 goto nla_put_failure;
744 702
745 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) || 703 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
746 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags)) 704 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
747 goto nla_put_failure; 705 goto nla_put_failure;
748 706
749 nla_nest_end(skb, nest); 707 nla_nest_end(skb, nest);
@@ -752,23 +710,26 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
752 nla_nest_end(skb, nla_ops); 710 nla_nest_end(skb, nla_ops);
753 } 711 }
754 712
755 if (!list_empty(&family->mcast_groups)) { 713 if (family->n_mcgrps) {
756 struct genl_multicast_group *grp;
757 struct nlattr *nla_grps; 714 struct nlattr *nla_grps;
758 int idx = 1; 715 int i;
759 716
760 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 717 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
761 if (nla_grps == NULL) 718 if (nla_grps == NULL)
762 goto nla_put_failure; 719 goto nla_put_failure;
763 720
764 list_for_each_entry(grp, &family->mcast_groups, list) { 721 for (i = 0; i < family->n_mcgrps; i++) {
765 struct nlattr *nest; 722 struct nlattr *nest;
723 const struct genl_multicast_group *grp;
724
725 grp = &family->mcgrps[i];
766 726
767 nest = nla_nest_start(skb, idx++); 727 nest = nla_nest_start(skb, i + 1);
768 if (nest == NULL) 728 if (nest == NULL)
769 goto nla_put_failure; 729 goto nla_put_failure;
770 730
771 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || 731 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
732 family->mcgrp_offset + i) ||
772 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 733 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
773 grp->name)) 734 grp->name))
774 goto nla_put_failure; 735 goto nla_put_failure;
@@ -785,9 +746,10 @@ nla_put_failure:
785 return -EMSGSIZE; 746 return -EMSGSIZE;
786} 747}
787 748
788static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid, 749static int ctrl_fill_mcgrp_info(struct genl_family *family,
789 u32 seq, u32 flags, struct sk_buff *skb, 750 const struct genl_multicast_group *grp,
790 u8 cmd) 751 int grp_id, u32 portid, u32 seq, u32 flags,
752 struct sk_buff *skb, u8 cmd)
791{ 753{
792 void *hdr; 754 void *hdr;
793 struct nlattr *nla_grps; 755 struct nlattr *nla_grps;
@@ -797,8 +759,8 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid,
797 if (hdr == NULL) 759 if (hdr == NULL)
798 return -1; 760 return -1;
799 761
800 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) || 762 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
801 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id)) 763 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
802 goto nla_put_failure; 764 goto nla_put_failure;
803 765
804 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 766 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
@@ -809,7 +771,7 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid,
809 if (nest == NULL) 771 if (nest == NULL)
810 goto nla_put_failure; 772 goto nla_put_failure;
811 773
812 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || 774 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
813 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 775 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
814 grp->name)) 776 grp->name))
815 goto nla_put_failure; 777 goto nla_put_failure;
@@ -875,8 +837,10 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
875 return skb; 837 return skb;
876} 838}
877 839
878static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp, 840static struct sk_buff *
879 u32 portid, int seq, u8 cmd) 841ctrl_build_mcgrp_msg(struct genl_family *family,
842 const struct genl_multicast_group *grp,
843 int grp_id, u32 portid, int seq, u8 cmd)
880{ 844{
881 struct sk_buff *skb; 845 struct sk_buff *skb;
882 int err; 846 int err;
@@ -885,7 +849,8 @@ static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
885 if (skb == NULL) 849 if (skb == NULL)
886 return ERR_PTR(-ENOBUFS); 850 return ERR_PTR(-ENOBUFS);
887 851
888 err = ctrl_fill_mcgrp_info(grp, portid, seq, 0, skb, cmd); 852 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
853 seq, 0, skb, cmd);
889 if (err < 0) { 854 if (err < 0) {
890 nlmsg_free(skb); 855 nlmsg_free(skb);
891 return ERR_PTR(err); 856 return ERR_PTR(err);
@@ -947,11 +912,11 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
947 return genlmsg_reply(msg, info); 912 return genlmsg_reply(msg, info);
948} 913}
949 914
950static int genl_ctrl_event(int event, void *data) 915static int genl_ctrl_event(int event, struct genl_family *family,
916 const struct genl_multicast_group *grp,
917 int grp_id)
951{ 918{
952 struct sk_buff *msg; 919 struct sk_buff *msg;
953 struct genl_family *family;
954 struct genl_multicast_group *grp;
955 920
956 /* genl is still initialising */ 921 /* genl is still initialising */
957 if (!init_net.genl_sock) 922 if (!init_net.genl_sock)
@@ -960,14 +925,13 @@ static int genl_ctrl_event(int event, void *data)
960 switch (event) { 925 switch (event) {
961 case CTRL_CMD_NEWFAMILY: 926 case CTRL_CMD_NEWFAMILY:
962 case CTRL_CMD_DELFAMILY: 927 case CTRL_CMD_DELFAMILY:
963 family = data; 928 WARN_ON(grp);
964 msg = ctrl_build_family_msg(family, 0, 0, event); 929 msg = ctrl_build_family_msg(family, 0, 0, event);
965 break; 930 break;
966 case CTRL_CMD_NEWMCAST_GRP: 931 case CTRL_CMD_NEWMCAST_GRP:
967 case CTRL_CMD_DELMCAST_GRP: 932 case CTRL_CMD_DELMCAST_GRP:
968 grp = data; 933 BUG_ON(!grp);
969 family = grp->family; 934 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
970 msg = ctrl_build_mcgrp_msg(data, 0, 0, event);
971 break; 935 break;
972 default: 936 default:
973 return -EINVAL; 937 return -EINVAL;
@@ -977,26 +941,29 @@ static int genl_ctrl_event(int event, void *data)
977 return PTR_ERR(msg); 941 return PTR_ERR(msg);
978 942
979 if (!family->netnsok) { 943 if (!family->netnsok) {
980 genlmsg_multicast_netns(&init_net, msg, 0, 944 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
981 GENL_ID_CTRL, GFP_KERNEL); 945 0, GFP_KERNEL);
982 } else { 946 } else {
983 rcu_read_lock(); 947 rcu_read_lock();
984 genlmsg_multicast_allns(msg, 0, GENL_ID_CTRL, GFP_ATOMIC); 948 genlmsg_multicast_allns(&genl_ctrl, msg, 0,
949 0, GFP_ATOMIC);
985 rcu_read_unlock(); 950 rcu_read_unlock();
986 } 951 }
987 952
988 return 0; 953 return 0;
989} 954}
990 955
991static struct genl_ops genl_ctrl_ops = { 956static struct genl_ops genl_ctrl_ops[] = {
992 .cmd = CTRL_CMD_GETFAMILY, 957 {
993 .doit = ctrl_getfamily, 958 .cmd = CTRL_CMD_GETFAMILY,
994 .dumpit = ctrl_dumpfamily, 959 .doit = ctrl_getfamily,
995 .policy = ctrl_policy, 960 .dumpit = ctrl_dumpfamily,
961 .policy = ctrl_policy,
962 },
996}; 963};
997 964
998static struct genl_multicast_group notify_grp = { 965static struct genl_multicast_group genl_ctrl_groups[] = {
999 .name = "notify", 966 { .name = "notify", },
1000}; 967};
1001 968
1002static int __net_init genl_pernet_init(struct net *net) 969static int __net_init genl_pernet_init(struct net *net)
@@ -1036,7 +1003,8 @@ static int __init genl_init(void)
1036 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) 1003 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
1037 INIT_LIST_HEAD(&family_ht[i]); 1004 INIT_LIST_HEAD(&family_ht[i]);
1038 1005
1039 err = genl_register_family_with_ops(&genl_ctrl, &genl_ctrl_ops, 1); 1006 err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops,
1007 genl_ctrl_groups);
1040 if (err < 0) 1008 if (err < 0)
1041 goto problem; 1009 goto problem;
1042 1010
@@ -1044,10 +1012,6 @@ static int __init genl_init(void)
1044 if (err) 1012 if (err)
1045 goto problem; 1013 goto problem;
1046 1014
1047 err = genl_register_mc_group(&genl_ctrl, &notify_grp);
1048 if (err < 0)
1049 goto problem;
1050
1051 return 0; 1015 return 0;
1052 1016
1053problem: 1017problem:
@@ -1085,14 +1049,18 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1085 return err; 1049 return err;
1086} 1050}
1087 1051
1088int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid, unsigned int group, 1052int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb,
1089 gfp_t flags) 1053 u32 portid, unsigned int group, gfp_t flags)
1090{ 1054{
1055 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1056 return -EINVAL;
1057 group = family->mcgrp_offset + group;
1091 return genlmsg_mcast(skb, portid, group, flags); 1058 return genlmsg_mcast(skb, portid, group, flags);
1092} 1059}
1093EXPORT_SYMBOL(genlmsg_multicast_allns); 1060EXPORT_SYMBOL(genlmsg_multicast_allns);
1094 1061
1095void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, u32 group, 1062void genl_notify(struct genl_family *family,
1063 struct sk_buff *skb, struct net *net, u32 portid, u32 group,
1096 struct nlmsghdr *nlh, gfp_t flags) 1064 struct nlmsghdr *nlh, gfp_t flags)
1097{ 1065{
1098 struct sock *sk = net->genl_sock; 1066 struct sock *sk = net->genl_sock;
@@ -1101,6 +1069,9 @@ void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, u32 group,
1101 if (nlh) 1069 if (nlh)
1102 report = nlmsg_report(nlh); 1070 report = nlmsg_report(nlh);
1103 1071
1072 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1073 return;
1074 group = family->mcgrp_offset + group;
1104 nlmsg_notify(sk, skb, portid, group, report, flags); 1075 nlmsg_notify(sk, skb, portid, group, report, flags);
1105} 1076}
1106EXPORT_SYMBOL(genl_notify); 1077EXPORT_SYMBOL(genl_notify);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 698814bfa7ad..53c19a35fc6d 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1179,10 +1179,9 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
1179 sax->sax25_family = AF_NETROM; 1179 sax->sax25_family = AF_NETROM;
1180 skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, 1180 skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
1181 AX25_ADDR_LEN); 1181 AX25_ADDR_LEN);
1182 msg->msg_namelen = sizeof(*sax);
1182 } 1183 }
1183 1184
1184 msg->msg_namelen = sizeof(*sax);
1185
1186 skb_free_datagram(sk, skb); 1185 skb_free_datagram(sk, skb);
1187 1186
1188 release_sock(sk); 1187 release_sock(sk);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index d308402b67d8..824c6056bf82 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -807,8 +807,6 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
807 807
808 pr_debug("%p %zu\n", sk, len); 808 pr_debug("%p %zu\n", sk, len);
809 809
810 msg->msg_namelen = 0;
811
812 lock_sock(sk); 810 lock_sock(sk);
813 811
814 if (sk->sk_state == LLCP_CLOSED && 812 if (sk->sk_state == LLCP_CLOSED &&
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 84b7e3ea7b7a..a9b2342d5253 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -30,8 +30,8 @@
30#include "nfc.h" 30#include "nfc.h"
31#include "llcp.h" 31#include "llcp.h"
32 32
33static struct genl_multicast_group nfc_genl_event_mcgrp = { 33static const struct genl_multicast_group nfc_genl_mcgrps[] = {
34 .name = NFC_GENL_MCAST_EVENT_NAME, 34 { .name = NFC_GENL_MCAST_EVENT_NAME, },
35}; 35};
36 36
37static struct genl_family nfc_genl_family = { 37static struct genl_family nfc_genl_family = {
@@ -194,7 +194,7 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
194 194
195 genlmsg_end(msg, hdr); 195 genlmsg_end(msg, hdr);
196 196
197 return genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); 197 return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC);
198 198
199nla_put_failure: 199nla_put_failure:
200 genlmsg_cancel(msg, hdr); 200 genlmsg_cancel(msg, hdr);
@@ -223,7 +223,7 @@ int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx)
223 223
224 genlmsg_end(msg, hdr); 224 genlmsg_end(msg, hdr);
225 225
226 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 226 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
227 227
228 return 0; 228 return 0;
229 229
@@ -255,7 +255,7 @@ int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol)
255 255
256 genlmsg_end(msg, hdr); 256 genlmsg_end(msg, hdr);
257 257
258 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 258 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
259 259
260 return 0; 260 return 0;
261 261
@@ -285,7 +285,7 @@ int nfc_genl_tm_deactivated(struct nfc_dev *dev)
285 285
286 genlmsg_end(msg, hdr); 286 genlmsg_end(msg, hdr);
287 287
288 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 288 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
289 289
290 return 0; 290 return 0;
291 291
@@ -318,7 +318,7 @@ int nfc_genl_device_added(struct nfc_dev *dev)
318 318
319 genlmsg_end(msg, hdr); 319 genlmsg_end(msg, hdr);
320 320
321 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 321 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
322 322
323 return 0; 323 return 0;
324 324
@@ -348,7 +348,7 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
348 348
349 genlmsg_end(msg, hdr); 349 genlmsg_end(msg, hdr);
350 350
351 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 351 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
352 352
353 return 0; 353 return 0;
354 354
@@ -414,7 +414,7 @@ int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list)
414 414
415 genlmsg_end(msg, hdr); 415 genlmsg_end(msg, hdr);
416 416
417 return genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); 417 return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC);
418 418
419nla_put_failure: 419nla_put_failure:
420 genlmsg_cancel(msg, hdr); 420 genlmsg_cancel(msg, hdr);
@@ -448,7 +448,7 @@ int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type)
448 448
449 genlmsg_end(msg, hdr); 449 genlmsg_end(msg, hdr);
450 450
451 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 451 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
452 452
453 return 0; 453 return 0;
454 454
@@ -479,7 +479,7 @@ int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx)
479 479
480 genlmsg_end(msg, hdr); 480 genlmsg_end(msg, hdr);
481 481
482 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 482 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
483 483
484 return 0; 484 return 0;
485 485
@@ -600,7 +600,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
600 600
601 dev->dep_link_up = true; 601 dev->dep_link_up = true;
602 602
603 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); 603 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC);
604 604
605 return 0; 605 return 0;
606 606
@@ -632,7 +632,7 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
632 632
633 genlmsg_end(msg, hdr); 633 genlmsg_end(msg, hdr);
634 634
635 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); 635 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC);
636 636
637 return 0; 637 return 0;
638 638
@@ -1137,7 +1137,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
1137 1137
1138 genlmsg_end(msg, hdr); 1138 genlmsg_end(msg, hdr);
1139 1139
1140 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 1140 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
1141 1141
1142 return 0; 1142 return 0;
1143 1143
@@ -1308,7 +1308,7 @@ static void se_io_cb(void *context, u8 *apdu, size_t apdu_len, int err)
1308 1308
1309 genlmsg_end(msg, hdr); 1309 genlmsg_end(msg, hdr);
1310 1310
1311 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 1311 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
1312 1312
1313 kfree(ctx); 1313 kfree(ctx);
1314 1314
@@ -1364,7 +1364,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
1364 return dev->ops->se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx); 1364 return dev->ops->se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
1365} 1365}
1366 1366
1367static struct genl_ops nfc_genl_ops[] = { 1367static const struct genl_ops nfc_genl_ops[] = {
1368 { 1368 {
1369 .cmd = NFC_CMD_GET_DEVICE, 1369 .cmd = NFC_CMD_GET_DEVICE,
1370 .doit = nfc_genl_get_device, 1370 .doit = nfc_genl_get_device,
@@ -1536,16 +1536,15 @@ int __init nfc_genl_init(void)
1536{ 1536{
1537 int rc; 1537 int rc;
1538 1538
1539 rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops, 1539 rc = genl_register_family_with_ops_groups(&nfc_genl_family,
1540 ARRAY_SIZE(nfc_genl_ops)); 1540 nfc_genl_ops,
1541 nfc_genl_mcgrps);
1541 if (rc) 1542 if (rc)
1542 return rc; 1543 return rc;
1543 1544
1544 rc = genl_register_mc_group(&nfc_genl_family, &nfc_genl_event_mcgrp);
1545
1546 netlink_register_notifier(&nl_notifier); 1545 netlink_register_notifier(&nl_notifier);
1547 1546
1548 return rc; 1547 return 0;
1549} 1548}
1550 1549
1551/** 1550/**
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index cd958b381f96..66bcd2eb5773 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -244,8 +244,6 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
244 if (!skb) 244 if (!skb)
245 return rc; 245 return rc;
246 246
247 msg->msg_namelen = 0;
248
249 copied = skb->len; 247 copied = skb->len;
250 if (len < copied) { 248 if (len < copied) {
251 msg->msg_flags |= MSG_TRUNC; 249 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 449e0776a2c0..6f5e1dd3be2d 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -61,11 +61,11 @@
61 61
62int ovs_net_id __read_mostly; 62int ovs_net_id __read_mostly;
63 63
64static void ovs_notify(struct sk_buff *skb, struct genl_info *info, 64static void ovs_notify(struct genl_family *family,
65 struct genl_multicast_group *grp) 65 struct sk_buff *skb, struct genl_info *info)
66{ 66{
67 genl_notify(skb, genl_info_net(info), info->snd_portid, 67 genl_notify(family, skb, genl_info_net(info), info->snd_portid,
68 grp->id, info->nlhdr, GFP_KERNEL); 68 0, info->nlhdr, GFP_KERNEL);
69} 69}
70 70
71/** 71/**
@@ -557,7 +557,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
557 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, 557 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
558}; 558};
559 559
560static struct genl_ops dp_packet_genl_ops[] = { 560static const struct genl_ops dp_packet_genl_ops[] = {
561 { .cmd = OVS_PACKET_CMD_EXECUTE, 561 { .cmd = OVS_PACKET_CMD_EXECUTE,
562 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 562 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
563 .policy = packet_policy, 563 .policy = packet_policy,
@@ -877,10 +877,10 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
877 ovs_unlock(); 877 ovs_unlock();
878 878
879 if (!IS_ERR(reply)) 879 if (!IS_ERR(reply))
880 ovs_notify(reply, info, &ovs_dp_flow_multicast_group); 880 ovs_notify(&dp_flow_genl_family, reply, info);
881 else 881 else
882 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 882 genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
883 ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); 883 0, PTR_ERR(reply));
884 return 0; 884 return 0;
885 885
886err_flow_free: 886err_flow_free:
@@ -990,7 +990,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
990 ovs_flow_free(flow, true); 990 ovs_flow_free(flow, true);
991 ovs_unlock(); 991 ovs_unlock();
992 992
993 ovs_notify(reply, info, &ovs_dp_flow_multicast_group); 993 ovs_notify(&dp_flow_genl_family, reply, info);
994 return 0; 994 return 0;
995unlock: 995unlock:
996 ovs_unlock(); 996 ovs_unlock();
@@ -1034,7 +1034,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1034 return skb->len; 1034 return skb->len;
1035} 1035}
1036 1036
1037static struct genl_ops dp_flow_genl_ops[] = { 1037static const struct genl_ops dp_flow_genl_ops[] = {
1038 { .cmd = OVS_FLOW_CMD_NEW, 1038 { .cmd = OVS_FLOW_CMD_NEW,
1039 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1039 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1040 .policy = flow_policy, 1040 .policy = flow_policy,
@@ -1243,7 +1243,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1243 1243
1244 ovs_unlock(); 1244 ovs_unlock();
1245 1245
1246 ovs_notify(reply, info, &ovs_dp_datapath_multicast_group); 1246 ovs_notify(&dp_datapath_genl_family, reply, info);
1247 return 0; 1247 return 0;
1248 1248
1249err_destroy_local_port: 1249err_destroy_local_port:
@@ -1308,7 +1308,7 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1308 __dp_destroy(dp); 1308 __dp_destroy(dp);
1309 ovs_unlock(); 1309 ovs_unlock();
1310 1310
1311 ovs_notify(reply, info, &ovs_dp_datapath_multicast_group); 1311 ovs_notify(&dp_datapath_genl_family, reply, info);
1312 1312
1313 return 0; 1313 return 0;
1314unlock: 1314unlock:
@@ -1332,14 +1332,14 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1332 info->snd_seq, OVS_DP_CMD_NEW); 1332 info->snd_seq, OVS_DP_CMD_NEW);
1333 if (IS_ERR(reply)) { 1333 if (IS_ERR(reply)) {
1334 err = PTR_ERR(reply); 1334 err = PTR_ERR(reply);
1335 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 1335 genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0,
1336 ovs_dp_datapath_multicast_group.id, err); 1336 0, err);
1337 err = 0; 1337 err = 0;
1338 goto unlock; 1338 goto unlock;
1339 } 1339 }
1340 1340
1341 ovs_unlock(); 1341 ovs_unlock();
1342 ovs_notify(reply, info, &ovs_dp_datapath_multicast_group); 1342 ovs_notify(&dp_datapath_genl_family, reply, info);
1343 1343
1344 return 0; 1344 return 0;
1345unlock: 1345unlock:
@@ -1398,7 +1398,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1398 return skb->len; 1398 return skb->len;
1399} 1399}
1400 1400
1401static struct genl_ops dp_datapath_genl_ops[] = { 1401static const struct genl_ops dp_datapath_genl_ops[] = {
1402 { .cmd = OVS_DP_CMD_NEW, 1402 { .cmd = OVS_DP_CMD_NEW,
1403 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1403 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1404 .policy = datapath_policy, 1404 .policy = datapath_policy,
@@ -1431,7 +1431,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1431 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, 1431 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1432}; 1432};
1433 1433
1434static struct genl_family dp_vport_genl_family = { 1434struct genl_family dp_vport_genl_family = {
1435 .id = GENL_ID_GENERATE, 1435 .id = GENL_ID_GENERATE,
1436 .hdrsize = sizeof(struct ovs_header), 1436 .hdrsize = sizeof(struct ovs_header),
1437 .name = OVS_VPORT_FAMILY, 1437 .name = OVS_VPORT_FAMILY,
@@ -1601,7 +1601,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1601 goto exit_unlock; 1601 goto exit_unlock;
1602 } 1602 }
1603 1603
1604 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 1604 ovs_notify(&dp_vport_genl_family, reply, info);
1605 1605
1606exit_unlock: 1606exit_unlock:
1607 ovs_unlock(); 1607 ovs_unlock();
@@ -1648,7 +1648,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1648 BUG_ON(err < 0); 1648 BUG_ON(err < 0);
1649 1649
1650 ovs_unlock(); 1650 ovs_unlock();
1651 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 1651 ovs_notify(&dp_vport_genl_family, reply, info);
1652 return 0; 1652 return 0;
1653 1653
1654exit_free: 1654exit_free:
@@ -1685,7 +1685,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1685 err = 0; 1685 err = 0;
1686 ovs_dp_detach_port(vport); 1686 ovs_dp_detach_port(vport);
1687 1687
1688 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 1688 ovs_notify(&dp_vport_genl_family, reply, info);
1689 1689
1690exit_unlock: 1690exit_unlock:
1691 ovs_unlock(); 1691 ovs_unlock();
@@ -1759,7 +1759,7 @@ out:
1759 return skb->len; 1759 return skb->len;
1760} 1760}
1761 1761
1762static struct genl_ops dp_vport_genl_ops[] = { 1762static const struct genl_ops dp_vport_genl_ops[] = {
1763 { .cmd = OVS_VPORT_CMD_NEW, 1763 { .cmd = OVS_VPORT_CMD_NEW,
1764 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1764 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1765 .policy = vport_policy, 1765 .policy = vport_policy,
@@ -1785,9 +1785,9 @@ static struct genl_ops dp_vport_genl_ops[] = {
1785 1785
1786struct genl_family_and_ops { 1786struct genl_family_and_ops {
1787 struct genl_family *family; 1787 struct genl_family *family;
1788 struct genl_ops *ops; 1788 const struct genl_ops *ops;
1789 int n_ops; 1789 int n_ops;
1790 struct genl_multicast_group *group; 1790 const struct genl_multicast_group *group;
1791}; 1791};
1792 1792
1793static const struct genl_family_and_ops dp_genl_families[] = { 1793static const struct genl_family_and_ops dp_genl_families[] = {
@@ -1823,17 +1823,14 @@ static int dp_register_genl(void)
1823 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { 1823 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1824 const struct genl_family_and_ops *f = &dp_genl_families[i]; 1824 const struct genl_family_and_ops *f = &dp_genl_families[i];
1825 1825
1826 err = genl_register_family_with_ops(f->family, f->ops, 1826 f->family->ops = f->ops;
1827 f->n_ops); 1827 f->family->n_ops = f->n_ops;
1828 f->family->mcgrps = f->group;
1829 f->family->n_mcgrps = f->group ? 1 : 0;
1830 err = genl_register_family(f->family);
1828 if (err) 1831 if (err)
1829 goto error; 1832 goto error;
1830 n_registered++; 1833 n_registered++;
1831
1832 if (f->group) {
1833 err = genl_register_mc_group(f->family, f->group);
1834 if (err)
1835 goto error;
1836 }
1837 } 1834 }
1838 1835
1839 return 0; 1836 return 0;
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index d3d14a58aa91..4067ea41be28 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -177,6 +177,7 @@ static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_n
177} 177}
178 178
179extern struct notifier_block ovs_dp_device_notifier; 179extern struct notifier_block ovs_dp_device_notifier;
180extern struct genl_family dp_vport_genl_family;
180extern struct genl_multicast_group ovs_dp_vport_multicast_group; 181extern struct genl_multicast_group ovs_dp_vport_multicast_group;
181 182
182void ovs_dp_process_received_packet(struct vport *, struct sk_buff *); 183void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index 5c2dab276109..2c631fe76be1 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -34,15 +34,14 @@ static void dp_detach_port_notify(struct vport *vport)
34 OVS_VPORT_CMD_DEL); 34 OVS_VPORT_CMD_DEL);
35 ovs_dp_detach_port(vport); 35 ovs_dp_detach_port(vport);
36 if (IS_ERR(notify)) { 36 if (IS_ERR(notify)) {
37 netlink_set_err(ovs_dp_get_net(dp)->genl_sock, 0, 37 genl_set_err(&dp_vport_genl_family, ovs_dp_get_net(dp), 0,
38 ovs_dp_vport_multicast_group.id, 38 0, PTR_ERR(notify));
39 PTR_ERR(notify));
40 return; 39 return;
41 } 40 }
42 41
43 genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0, 42 genlmsg_multicast_netns(&dp_vport_genl_family,
44 ovs_dp_vport_multicast_group.id, 43 ovs_dp_get_net(dp), notify, 0,
45 GFP_KERNEL); 44 0, GFP_KERNEL);
46} 45}
47 46
48void ovs_dp_notify_wq(struct work_struct *work) 47void ovs_dp_notify_wq(struct work_struct *work)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2e8286b47c28..88cfbc189558 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -237,6 +237,30 @@ struct packet_skb_cb {
237static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 237static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
238static void __fanout_link(struct sock *sk, struct packet_sock *po); 238static void __fanout_link(struct sock *sk, struct packet_sock *po);
239 239
240static struct net_device *packet_cached_dev_get(struct packet_sock *po)
241{
242 struct net_device *dev;
243
244 rcu_read_lock();
245 dev = rcu_dereference(po->cached_dev);
246 if (likely(dev))
247 dev_hold(dev);
248 rcu_read_unlock();
249
250 return dev;
251}
252
253static void packet_cached_dev_assign(struct packet_sock *po,
254 struct net_device *dev)
255{
256 rcu_assign_pointer(po->cached_dev, dev);
257}
258
259static void packet_cached_dev_reset(struct packet_sock *po)
260{
261 RCU_INIT_POINTER(po->cached_dev, NULL);
262}
263
240/* register_prot_hook must be invoked with the po->bind_lock held, 264/* register_prot_hook must be invoked with the po->bind_lock held,
241 * or from a context in which asynchronous accesses to the packet 265 * or from a context in which asynchronous accesses to the packet
242 * socket is not possible (packet_create()). 266 * socket is not possible (packet_create()).
@@ -244,11 +268,13 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
244static void register_prot_hook(struct sock *sk) 268static void register_prot_hook(struct sock *sk)
245{ 269{
246 struct packet_sock *po = pkt_sk(sk); 270 struct packet_sock *po = pkt_sk(sk);
271
247 if (!po->running) { 272 if (!po->running) {
248 if (po->fanout) 273 if (po->fanout)
249 __fanout_link(sk, po); 274 __fanout_link(sk, po);
250 else 275 else
251 dev_add_pack(&po->prot_hook); 276 dev_add_pack(&po->prot_hook);
277
252 sock_hold(sk); 278 sock_hold(sk);
253 po->running = 1; 279 po->running = 1;
254 } 280 }
@@ -266,10 +292,12 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
266 struct packet_sock *po = pkt_sk(sk); 292 struct packet_sock *po = pkt_sk(sk);
267 293
268 po->running = 0; 294 po->running = 0;
295
269 if (po->fanout) 296 if (po->fanout)
270 __fanout_unlink(sk, po); 297 __fanout_unlink(sk, po);
271 else 298 else
272 __dev_remove_pack(&po->prot_hook); 299 __dev_remove_pack(&po->prot_hook);
300
273 __sock_put(sk); 301 __sock_put(sk);
274 302
275 if (sync) { 303 if (sync) {
@@ -432,9 +460,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
432 460
433 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; 461 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
434 462
435 spin_lock(&rb_queue->lock); 463 spin_lock_bh(&rb_queue->lock);
436 pkc->delete_blk_timer = 1; 464 pkc->delete_blk_timer = 1;
437 spin_unlock(&rb_queue->lock); 465 spin_unlock_bh(&rb_queue->lock);
438 466
439 prb_del_retire_blk_timer(pkc); 467 prb_del_retire_blk_timer(pkc);
440} 468}
@@ -2057,7 +2085,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2057 struct sk_buff *skb; 2085 struct sk_buff *skb;
2058 struct net_device *dev; 2086 struct net_device *dev;
2059 __be16 proto; 2087 __be16 proto;
2060 bool need_rls_dev = false;
2061 int err, reserve = 0; 2088 int err, reserve = 0;
2062 void *ph; 2089 void *ph;
2063 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; 2090 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
@@ -2069,8 +2096,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2069 2096
2070 mutex_lock(&po->pg_vec_lock); 2097 mutex_lock(&po->pg_vec_lock);
2071 2098
2072 if (saddr == NULL) { 2099 if (likely(saddr == NULL)) {
2073 dev = po->prot_hook.dev; 2100 dev = packet_cached_dev_get(po);
2074 proto = po->num; 2101 proto = po->num;
2075 addr = NULL; 2102 addr = NULL;
2076 } else { 2103 } else {
@@ -2084,19 +2111,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2084 proto = saddr->sll_protocol; 2111 proto = saddr->sll_protocol;
2085 addr = saddr->sll_addr; 2112 addr = saddr->sll_addr;
2086 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2113 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2087 need_rls_dev = true;
2088 } 2114 }
2089 2115
2090 err = -ENXIO; 2116 err = -ENXIO;
2091 if (unlikely(dev == NULL)) 2117 if (unlikely(dev == NULL))
2092 goto out; 2118 goto out;
2093
2094 reserve = dev->hard_header_len;
2095
2096 err = -ENETDOWN; 2119 err = -ENETDOWN;
2097 if (unlikely(!(dev->flags & IFF_UP))) 2120 if (unlikely(!(dev->flags & IFF_UP)))
2098 goto out_put; 2121 goto out_put;
2099 2122
2123 reserve = dev->hard_header_len;
2124
2100 size_max = po->tx_ring.frame_size 2125 size_max = po->tx_ring.frame_size
2101 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2126 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2102 2127
@@ -2173,8 +2198,7 @@ out_status:
2173 __packet_set_status(po, ph, status); 2198 __packet_set_status(po, ph, status);
2174 kfree_skb(skb); 2199 kfree_skb(skb);
2175out_put: 2200out_put:
2176 if (need_rls_dev) 2201 dev_put(dev);
2177 dev_put(dev);
2178out: 2202out:
2179 mutex_unlock(&po->pg_vec_lock); 2203 mutex_unlock(&po->pg_vec_lock);
2180 return err; 2204 return err;
@@ -2212,7 +2236,6 @@ static int packet_snd(struct socket *sock,
2212 struct sk_buff *skb; 2236 struct sk_buff *skb;
2213 struct net_device *dev; 2237 struct net_device *dev;
2214 __be16 proto; 2238 __be16 proto;
2215 bool need_rls_dev = false;
2216 unsigned char *addr; 2239 unsigned char *addr;
2217 int err, reserve = 0; 2240 int err, reserve = 0;
2218 struct virtio_net_hdr vnet_hdr = { 0 }; 2241 struct virtio_net_hdr vnet_hdr = { 0 };
@@ -2227,8 +2250,8 @@ static int packet_snd(struct socket *sock,
2227 * Get and verify the address. 2250 * Get and verify the address.
2228 */ 2251 */
2229 2252
2230 if (saddr == NULL) { 2253 if (likely(saddr == NULL)) {
2231 dev = po->prot_hook.dev; 2254 dev = packet_cached_dev_get(po);
2232 proto = po->num; 2255 proto = po->num;
2233 addr = NULL; 2256 addr = NULL;
2234 } else { 2257 } else {
@@ -2240,19 +2263,17 @@ static int packet_snd(struct socket *sock,
2240 proto = saddr->sll_protocol; 2263 proto = saddr->sll_protocol;
2241 addr = saddr->sll_addr; 2264 addr = saddr->sll_addr;
2242 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2265 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2243 need_rls_dev = true;
2244 } 2266 }
2245 2267
2246 err = -ENXIO; 2268 err = -ENXIO;
2247 if (dev == NULL) 2269 if (unlikely(dev == NULL))
2248 goto out_unlock; 2270 goto out_unlock;
2249 if (sock->type == SOCK_RAW)
2250 reserve = dev->hard_header_len;
2251
2252 err = -ENETDOWN; 2271 err = -ENETDOWN;
2253 if (!(dev->flags & IFF_UP)) 2272 if (unlikely(!(dev->flags & IFF_UP)))
2254 goto out_unlock; 2273 goto out_unlock;
2255 2274
2275 if (sock->type == SOCK_RAW)
2276 reserve = dev->hard_header_len;
2256 if (po->has_vnet_hdr) { 2277 if (po->has_vnet_hdr) {
2257 vnet_hdr_len = sizeof(vnet_hdr); 2278 vnet_hdr_len = sizeof(vnet_hdr);
2258 2279
@@ -2386,15 +2407,14 @@ static int packet_snd(struct socket *sock,
2386 if (err > 0 && (err = net_xmit_errno(err)) != 0) 2407 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2387 goto out_unlock; 2408 goto out_unlock;
2388 2409
2389 if (need_rls_dev) 2410 dev_put(dev);
2390 dev_put(dev);
2391 2411
2392 return len; 2412 return len;
2393 2413
2394out_free: 2414out_free:
2395 kfree_skb(skb); 2415 kfree_skb(skb);
2396out_unlock: 2416out_unlock:
2397 if (dev && need_rls_dev) 2417 if (dev)
2398 dev_put(dev); 2418 dev_put(dev);
2399out: 2419out:
2400 return err; 2420 return err;
@@ -2439,6 +2459,8 @@ static int packet_release(struct socket *sock)
2439 2459
2440 spin_lock(&po->bind_lock); 2460 spin_lock(&po->bind_lock);
2441 unregister_prot_hook(sk, false); 2461 unregister_prot_hook(sk, false);
2462 packet_cached_dev_reset(po);
2463
2442 if (po->prot_hook.dev) { 2464 if (po->prot_hook.dev) {
2443 dev_put(po->prot_hook.dev); 2465 dev_put(po->prot_hook.dev);
2444 po->prot_hook.dev = NULL; 2466 po->prot_hook.dev = NULL;
@@ -2494,14 +2516,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
2494 2516
2495 spin_lock(&po->bind_lock); 2517 spin_lock(&po->bind_lock);
2496 unregister_prot_hook(sk, true); 2518 unregister_prot_hook(sk, true);
2519
2497 po->num = protocol; 2520 po->num = protocol;
2498 po->prot_hook.type = protocol; 2521 po->prot_hook.type = protocol;
2499 if (po->prot_hook.dev) 2522 if (po->prot_hook.dev)
2500 dev_put(po->prot_hook.dev); 2523 dev_put(po->prot_hook.dev);
2501 po->prot_hook.dev = dev;
2502 2524
2525 po->prot_hook.dev = dev;
2503 po->ifindex = dev ? dev->ifindex : 0; 2526 po->ifindex = dev ? dev->ifindex : 0;
2504 2527
2528 packet_cached_dev_assign(po, dev);
2529
2505 if (protocol == 0) 2530 if (protocol == 0)
2506 goto out_unlock; 2531 goto out_unlock;
2507 2532
@@ -2615,6 +2640,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
2615 sk->sk_family = PF_PACKET; 2640 sk->sk_family = PF_PACKET;
2616 po->num = proto; 2641 po->num = proto;
2617 2642
2643 packet_cached_dev_reset(po);
2644
2618 sk->sk_destruct = packet_sock_destruct; 2645 sk->sk_destruct = packet_sock_destruct;
2619 sk_refcnt_debug_inc(sk); 2646 sk_refcnt_debug_inc(sk);
2620 2647
@@ -2660,7 +2687,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2660 struct sock *sk = sock->sk; 2687 struct sock *sk = sock->sk;
2661 struct sk_buff *skb; 2688 struct sk_buff *skb;
2662 int copied, err; 2689 int copied, err;
2663 struct sockaddr_ll *sll;
2664 int vnet_hdr_len = 0; 2690 int vnet_hdr_len = 0;
2665 2691
2666 err = -EINVAL; 2692 err = -EINVAL;
@@ -2744,22 +2770,10 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2744 goto out_free; 2770 goto out_free;
2745 } 2771 }
2746 2772
2747 /* 2773 /* You lose any data beyond the buffer you gave. If it worries
2748 * If the address length field is there to be filled in, we fill 2774 * a user program they can ask the device for its MTU
2749 * it in now. 2775 * anyway.
2750 */ 2776 */
2751
2752 sll = &PACKET_SKB_CB(skb)->sa.ll;
2753 if (sock->type == SOCK_PACKET)
2754 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2755 else
2756 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2757
2758 /*
2759 * You lose any data beyond the buffer you gave. If it worries a
2760 * user program they can ask the device for its MTU anyway.
2761 */
2762
2763 copied = skb->len; 2777 copied = skb->len;
2764 if (copied > len) { 2778 if (copied > len) {
2765 copied = len; 2779 copied = len;
@@ -2772,9 +2786,20 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2772 2786
2773 sock_recv_ts_and_drops(msg, sk, skb); 2787 sock_recv_ts_and_drops(msg, sk, skb);
2774 2788
2775 if (msg->msg_name) 2789 if (msg->msg_name) {
2790 /* If the address length field is there to be filled
2791 * in, we fill it in now.
2792 */
2793 if (sock->type == SOCK_PACKET) {
2794 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2795 } else {
2796 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2797 msg->msg_namelen = sll->sll_halen +
2798 offsetof(struct sockaddr_ll, sll_addr);
2799 }
2776 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, 2800 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2777 msg->msg_namelen); 2801 msg->msg_namelen);
2802 }
2778 2803
2779 if (pkt_sk(sk)->auxdata) { 2804 if (pkt_sk(sk)->auxdata) {
2780 struct tpacket_auxdata aux; 2805 struct tpacket_auxdata aux;
@@ -3326,6 +3351,7 @@ static int packet_notifier(struct notifier_block *this,
3326 sk->sk_error_report(sk); 3351 sk->sk_error_report(sk);
3327 } 3352 }
3328 if (msg == NETDEV_UNREGISTER) { 3353 if (msg == NETDEV_UNREGISTER) {
3354 packet_cached_dev_reset(po);
3329 po->ifindex = -1; 3355 po->ifindex = -1;
3330 if (po->prot_hook.dev) 3356 if (po->prot_hook.dev)
3331 dev_put(po->prot_hook.dev); 3357 dev_put(po->prot_hook.dev);
diff --git a/net/packet/internal.h b/net/packet/internal.h
index c4e4b4561207..1035fa2d909c 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -113,6 +113,7 @@ struct packet_sock {
113 unsigned int tp_loss:1; 113 unsigned int tp_loss:1;
114 unsigned int tp_tx_has_off:1; 114 unsigned int tp_tx_has_off:1;
115 unsigned int tp_tstamp; 115 unsigned int tp_tstamp;
116 struct net_device __rcu *cached_dev;
116 struct packet_type prot_hook ____cacheline_aligned_in_smp; 117 struct packet_type prot_hook ____cacheline_aligned_in_smp;
117}; 118};
118 119
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 12c30f3e643e..38946b26e471 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -139,9 +139,6 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
139 MSG_CMSG_COMPAT)) 139 MSG_CMSG_COMPAT))
140 goto out_nofree; 140 goto out_nofree;
141 141
142 if (addr_len)
143 *addr_len = sizeof(sa);
144
145 skb = skb_recv_datagram(sk, flags, noblock, &rval); 142 skb = skb_recv_datagram(sk, flags, noblock, &rval);
146 if (skb == NULL) 143 if (skb == NULL)
147 goto out_nofree; 144 goto out_nofree;
@@ -162,8 +159,10 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
162 159
163 rval = (flags & MSG_TRUNC) ? skb->len : copylen; 160 rval = (flags & MSG_TRUNC) ? skb->len : copylen;
164 161
165 if (msg->msg_name != NULL) 162 if (msg->msg_name != NULL) {
166 memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn)); 163 memcpy(msg->msg_name, &sa, sizeof(sa));
164 *addr_len = sizeof(sa);
165 }
167 166
168out: 167out:
169 skb_free_datagram(sk, skb); 168 skb_free_datagram(sk, skb);
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index e59094981175..37be6e226d1b 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
554 scat = &rm->data.op_sg[sg]; 554 scat = &rm->data.op_sg[sg];
555 ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 555 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
556 ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); 556 return sizeof(struct rds_header) + ret;
557 return ret;
558 } 557 }
559 558
560 /* FIXME we may overallocate here */ 559 /* FIXME we may overallocate here */
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 9f0f17cf6bf9..de339b24ca14 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -410,8 +410,6 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
410 410
411 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo); 411 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
412 412
413 msg->msg_namelen = 0;
414
415 if (msg_flags & MSG_OOB) 413 if (msg_flags & MSG_OOB)
416 goto out; 414 goto out;
417 415
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index e98fcfbe6007..33af77246bfe 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1216,7 +1216,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1216{ 1216{
1217 struct sock *sk = sock->sk; 1217 struct sock *sk = sock->sk;
1218 struct rose_sock *rose = rose_sk(sk); 1218 struct rose_sock *rose = rose_sk(sk);
1219 struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
1220 size_t copied; 1219 size_t copied;
1221 unsigned char *asmptr; 1220 unsigned char *asmptr;
1222 struct sk_buff *skb; 1221 struct sk_buff *skb;
@@ -1252,8 +1251,11 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1252 1251
1253 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1252 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1254 1253
1255 if (srose != NULL) { 1254 if (msg->msg_name) {
1256 memset(srose, 0, msg->msg_namelen); 1255 struct sockaddr_rose *srose;
1256
1257 memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1258 srose = msg->msg_name;
1257 srose->srose_family = AF_ROSE; 1259 srose->srose_family = AF_ROSE;
1258 srose->srose_addr = rose->dest_addr; 1260 srose->srose_addr = rose->dest_addr;
1259 srose->srose_call = rose->dest_call; 1261 srose->srose_call = rose->dest_call;
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 4b48687c3890..898492a8d61b 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -143,10 +143,13 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
143 143
144 /* copy the peer address and timestamp */ 144 /* copy the peer address and timestamp */
145 if (!continue_call) { 145 if (!continue_call) {
146 if (msg->msg_name && msg->msg_namelen > 0) 146 if (msg->msg_name) {
147 size_t len =
148 sizeof(call->conn->trans->peer->srx);
147 memcpy(msg->msg_name, 149 memcpy(msg->msg_name,
148 &call->conn->trans->peer->srx, 150 &call->conn->trans->peer->srx, len);
149 sizeof(call->conn->trans->peer->srx)); 151 msg->msg_namelen = len;
152 }
150 sock_recv_ts_and_drops(msg, &rx->sk, skb); 153 sock_recv_ts_and_drops(msg, &rx->sk, skb);
151 } 154 }
152 155
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index fd7072827a40..69cb848e8345 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -270,6 +270,16 @@ int tcf_register_action(struct tc_action_ops *act)
270{ 270{
271 struct tc_action_ops *a, **ap; 271 struct tc_action_ops *a, **ap;
272 272
273 /* Must supply act, dump, cleanup and init */
274 if (!act->act || !act->dump || !act->cleanup || !act->init)
275 return -EINVAL;
276
277 /* Supply defaults */
278 if (!act->lookup)
279 act->lookup = tcf_hash_search;
280 if (!act->walk)
281 act->walk = tcf_generic_walker;
282
273 write_lock(&act_mod_lock); 283 write_lock(&act_mod_lock);
274 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { 284 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) {
275 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { 285 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
@@ -381,7 +391,7 @@ int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
381 } 391 }
382 while ((a = act) != NULL) { 392 while ((a = act) != NULL) {
383repeat: 393repeat:
384 if (a->ops && a->ops->act) { 394 if (a->ops) {
385 ret = a->ops->act(skb, a, res); 395 ret = a->ops->act(skb, a, res);
386 if (TC_MUNGED & skb->tc_verd) { 396 if (TC_MUNGED & skb->tc_verd) {
387 /* copied already, allow trampling */ 397 /* copied already, allow trampling */
@@ -405,7 +415,7 @@ void tcf_action_destroy(struct tc_action *act, int bind)
405 struct tc_action *a; 415 struct tc_action *a;
406 416
407 for (a = act; a; a = act) { 417 for (a = act; a; a = act) {
408 if (a->ops && a->ops->cleanup) { 418 if (a->ops) {
409 if (a->ops->cleanup(a, bind) == ACT_P_DELETED) 419 if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
410 module_put(a->ops->owner); 420 module_put(a->ops->owner);
411 act = act->next; 421 act = act->next;
@@ -424,7 +434,7 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
424{ 434{
425 int err = -EINVAL; 435 int err = -EINVAL;
426 436
427 if (a->ops == NULL || a->ops->dump == NULL) 437 if (a->ops == NULL)
428 return err; 438 return err;
429 return a->ops->dump(skb, a, bind, ref); 439 return a->ops->dump(skb, a, bind, ref);
430} 440}
@@ -436,7 +446,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
436 unsigned char *b = skb_tail_pointer(skb); 446 unsigned char *b = skb_tail_pointer(skb);
437 struct nlattr *nest; 447 struct nlattr *nest;
438 448
439 if (a->ops == NULL || a->ops->dump == NULL) 449 if (a->ops == NULL)
440 return err; 450 return err;
441 451
442 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 452 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
@@ -723,8 +733,6 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
723 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); 733 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
724 if (a->ops == NULL) 734 if (a->ops == NULL)
725 goto err_free; 735 goto err_free;
726 if (a->ops->lookup == NULL)
727 goto err_mod;
728 err = -ENOENT; 736 err = -ENOENT;
729 if (a->ops->lookup(a, index) == 0) 737 if (a->ops->lookup(a, index) == 0)
730 goto err_mod; 738 goto err_mod;
@@ -1084,12 +1092,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1084 memset(&a, 0, sizeof(struct tc_action)); 1092 memset(&a, 0, sizeof(struct tc_action));
1085 a.ops = a_o; 1093 a.ops = a_o;
1086 1094
1087 if (a_o->walk == NULL) {
1088 WARN(1, "tc_dump_action: %s !capable of dumping table\n",
1089 a_o->kind);
1090 goto out_module_put;
1091 }
1092
1093 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1095 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1094 cb->nlh->nlmsg_type, sizeof(*t), 0); 1096 cb->nlh->nlmsg_type, sizeof(*t), 0);
1095 if (!nlh) 1097 if (!nlh)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 3a4c0caa1f7d..5c5edf56adbd 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -585,9 +585,7 @@ static struct tc_action_ops act_csum_ops = {
585 .act = tcf_csum, 585 .act = tcf_csum,
586 .dump = tcf_csum_dump, 586 .dump = tcf_csum_dump,
587 .cleanup = tcf_csum_cleanup, 587 .cleanup = tcf_csum_cleanup,
588 .lookup = tcf_hash_search,
589 .init = tcf_csum_init, 588 .init = tcf_csum_init,
590 .walk = tcf_generic_walker
591}; 589};
592 590
593MODULE_DESCRIPTION("Checksum updating actions"); 591MODULE_DESCRIPTION("Checksum updating actions");
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index fd2b3cff5fa2..5645a4d32abd 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -206,9 +206,7 @@ static struct tc_action_ops act_gact_ops = {
206 .act = tcf_gact, 206 .act = tcf_gact,
207 .dump = tcf_gact_dump, 207 .dump = tcf_gact_dump,
208 .cleanup = tcf_gact_cleanup, 208 .cleanup = tcf_gact_cleanup,
209 .lookup = tcf_hash_search,
210 .init = tcf_gact_init, 209 .init = tcf_gact_init,
211 .walk = tcf_generic_walker
212}; 210};
213 211
214MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); 212MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60d88b6b9560..882a89762f77 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -298,9 +298,7 @@ static struct tc_action_ops act_ipt_ops = {
298 .act = tcf_ipt, 298 .act = tcf_ipt,
299 .dump = tcf_ipt_dump, 299 .dump = tcf_ipt_dump,
300 .cleanup = tcf_ipt_cleanup, 300 .cleanup = tcf_ipt_cleanup,
301 .lookup = tcf_hash_search,
302 .init = tcf_ipt_init, 301 .init = tcf_ipt_init,
303 .walk = tcf_generic_walker
304}; 302};
305 303
306static struct tc_action_ops act_xt_ops = { 304static struct tc_action_ops act_xt_ops = {
@@ -312,9 +310,7 @@ static struct tc_action_ops act_xt_ops = {
312 .act = tcf_ipt, 310 .act = tcf_ipt,
313 .dump = tcf_ipt_dump, 311 .dump = tcf_ipt_dump,
314 .cleanup = tcf_ipt_cleanup, 312 .cleanup = tcf_ipt_cleanup,
315 .lookup = tcf_hash_search,
316 .init = tcf_ipt_init, 313 .init = tcf_ipt_init,
317 .walk = tcf_generic_walker
318}; 314};
319 315
320MODULE_AUTHOR("Jamal Hadi Salim(2002-13)"); 316MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 977c10e0631b..252378121ce7 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -271,9 +271,7 @@ static struct tc_action_ops act_mirred_ops = {
271 .act = tcf_mirred, 271 .act = tcf_mirred,
272 .dump = tcf_mirred_dump, 272 .dump = tcf_mirred_dump,
273 .cleanup = tcf_mirred_cleanup, 273 .cleanup = tcf_mirred_cleanup,
274 .lookup = tcf_hash_search,
275 .init = tcf_mirred_init, 274 .init = tcf_mirred_init,
276 .walk = tcf_generic_walker
277}; 275};
278 276
279MODULE_AUTHOR("Jamal Hadi Salim(2002)"); 277MODULE_AUTHOR("Jamal Hadi Salim(2002)");
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 876f0ef29694..6a15ace00241 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -308,9 +308,7 @@ static struct tc_action_ops act_nat_ops = {
308 .act = tcf_nat, 308 .act = tcf_nat,
309 .dump = tcf_nat_dump, 309 .dump = tcf_nat_dump,
310 .cleanup = tcf_nat_cleanup, 310 .cleanup = tcf_nat_cleanup,
311 .lookup = tcf_hash_search,
312 .init = tcf_nat_init, 311 .init = tcf_nat_init,
313 .walk = tcf_generic_walker
314}; 312};
315 313
316MODULE_DESCRIPTION("Stateless NAT actions"); 314MODULE_DESCRIPTION("Stateless NAT actions");
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 7ed78c9e505c..03b67674169c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -243,9 +243,7 @@ static struct tc_action_ops act_pedit_ops = {
243 .act = tcf_pedit, 243 .act = tcf_pedit,
244 .dump = tcf_pedit_dump, 244 .dump = tcf_pedit_dump,
245 .cleanup = tcf_pedit_cleanup, 245 .cleanup = tcf_pedit_cleanup,
246 .lookup = tcf_hash_search,
247 .init = tcf_pedit_init, 246 .init = tcf_pedit_init,
248 .walk = tcf_generic_walker
249}; 247};
250 248
251MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); 249MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 272d8e924cf6..16a62c36928a 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -407,7 +407,6 @@ static struct tc_action_ops act_police_ops = {
407 .act = tcf_act_police, 407 .act = tcf_act_police,
408 .dump = tcf_act_police_dump, 408 .dump = tcf_act_police_dump,
409 .cleanup = tcf_act_police_cleanup, 409 .cleanup = tcf_act_police_cleanup,
410 .lookup = tcf_hash_search,
411 .init = tcf_act_police_locate, 410 .init = tcf_act_police_locate,
412 .walk = tcf_act_police_walker 411 .walk = tcf_act_police_walker
413}; 412};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 7725eb4ab756..31157d3e729c 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -201,7 +201,6 @@ static struct tc_action_ops act_simp_ops = {
201 .dump = tcf_simp_dump, 201 .dump = tcf_simp_dump,
202 .cleanup = tcf_simp_cleanup, 202 .cleanup = tcf_simp_cleanup,
203 .init = tcf_simp_init, 203 .init = tcf_simp_init,
204 .walk = tcf_generic_walker,
205}; 204};
206 205
207MODULE_AUTHOR("Jamal Hadi Salim(2005)"); 206MODULE_AUTHOR("Jamal Hadi Salim(2005)");
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index cb4221171f93..35ea643b4325 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -203,7 +203,6 @@ static struct tc_action_ops act_skbedit_ops = {
203 .dump = tcf_skbedit_dump, 203 .dump = tcf_skbedit_dump,
204 .cleanup = tcf_skbedit_cleanup, 204 .cleanup = tcf_skbedit_cleanup,
205 .init = tcf_skbedit_init, 205 .init = tcf_skbedit_init,
206 .walk = tcf_generic_walker,
207}; 206};
208 207
209MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); 208MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index fdc041c57853..95d843961907 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -88,7 +88,7 @@ struct fq_sched_data {
88 struct fq_flow internal; /* for non classified or high prio packets */ 88 struct fq_flow internal; /* for non classified or high prio packets */
89 u32 quantum; 89 u32 quantum;
90 u32 initial_quantum; 90 u32 initial_quantum;
91 u32 flow_default_rate;/* rate per flow : bytes per second */ 91 u32 flow_refill_delay;
92 u32 flow_max_rate; /* optional max rate per flow */ 92 u32 flow_max_rate; /* optional max rate per flow */
93 u32 flow_plimit; /* max packets per flow */ 93 u32 flow_plimit; /* max packets per flow */
94 struct rb_root *fq_root; 94 struct rb_root *fq_root;
@@ -115,6 +115,7 @@ static struct fq_flow detached, throttled;
115static void fq_flow_set_detached(struct fq_flow *f) 115static void fq_flow_set_detached(struct fq_flow *f)
116{ 116{
117 f->next = &detached; 117 f->next = &detached;
118 f->age = jiffies;
118} 119}
119 120
120static bool fq_flow_is_detached(const struct fq_flow *f) 121static bool fq_flow_is_detached(const struct fq_flow *f)
@@ -209,21 +210,15 @@ static void fq_gc(struct fq_sched_data *q,
209 } 210 }
210} 211}
211 212
212static const u8 prio2band[TC_PRIO_MAX + 1] = {
213 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
214};
215
216static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) 213static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
217{ 214{
218 struct rb_node **p, *parent; 215 struct rb_node **p, *parent;
219 struct sock *sk = skb->sk; 216 struct sock *sk = skb->sk;
220 struct rb_root *root; 217 struct rb_root *root;
221 struct fq_flow *f; 218 struct fq_flow *f;
222 int band;
223 219
224 /* warning: no starvation prevention... */ 220 /* warning: no starvation prevention... */
225 band = prio2band[skb->priority & TC_PRIO_MAX]; 221 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
226 if (unlikely(band == 0))
227 return &q->internal; 222 return &q->internal;
228 223
229 if (unlikely(!sk)) { 224 if (unlikely(!sk)) {
@@ -373,17 +368,20 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
373 } 368 }
374 369
375 f->qlen++; 370 f->qlen++;
376 flow_queue_add(f, skb);
377 if (skb_is_retransmit(skb)) 371 if (skb_is_retransmit(skb))
378 q->stat_tcp_retrans++; 372 q->stat_tcp_retrans++;
379 sch->qstats.backlog += qdisc_pkt_len(skb); 373 sch->qstats.backlog += qdisc_pkt_len(skb);
380 if (fq_flow_is_detached(f)) { 374 if (fq_flow_is_detached(f)) {
381 fq_flow_add_tail(&q->new_flows, f); 375 fq_flow_add_tail(&q->new_flows, f);
382 if (q->quantum > f->credit) 376 if (time_after(jiffies, f->age + q->flow_refill_delay))
383 f->credit = q->quantum; 377 f->credit = max_t(u32, f->credit, q->quantum);
384 q->inactive_flows--; 378 q->inactive_flows--;
385 qdisc_unthrottled(sch); 379 qdisc_unthrottled(sch);
386 } 380 }
381
382 /* Note: this overwrites f->age */
383 flow_queue_add(f, skb);
384
387 if (unlikely(f == &q->internal)) { 385 if (unlikely(f == &q->internal)) {
388 q->stat_internal_packets++; 386 q->stat_internal_packets++;
389 qdisc_unthrottled(sch); 387 qdisc_unthrottled(sch);
@@ -461,7 +459,6 @@ begin:
461 fq_flow_add_tail(&q->old_flows, f); 459 fq_flow_add_tail(&q->old_flows, f);
462 } else { 460 } else {
463 fq_flow_set_detached(f); 461 fq_flow_set_detached(f);
464 f->age = jiffies;
465 q->inactive_flows++; 462 q->inactive_flows++;
466 } 463 }
467 goto begin; 464 goto begin;
@@ -615,6 +612,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
615 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, 612 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
616 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, 613 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
617 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, 614 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
615 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
618}; 616};
619 617
620static int fq_change(struct Qdisc *sch, struct nlattr *opt) 618static int fq_change(struct Qdisc *sch, struct nlattr *opt)
@@ -656,7 +654,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
656 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 654 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
657 655
658 if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) 656 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
659 q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]); 657 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
658 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
660 659
661 if (tb[TCA_FQ_FLOW_MAX_RATE]) 660 if (tb[TCA_FQ_FLOW_MAX_RATE])
662 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); 661 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
@@ -670,6 +669,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
670 err = -EINVAL; 669 err = -EINVAL;
671 } 670 }
672 671
672 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
673 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
674
675 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
676 }
677
673 if (!err) 678 if (!err)
674 err = fq_resize(q, fq_log); 679 err = fq_resize(q, fq_log);
675 680
@@ -705,7 +710,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
705 q->flow_plimit = 100; 710 q->flow_plimit = 100;
706 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); 711 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
707 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); 712 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
708 q->flow_default_rate = 0; 713 q->flow_refill_delay = msecs_to_jiffies(40);
709 q->flow_max_rate = ~0U; 714 q->flow_max_rate = ~0U;
710 q->rate_enable = 1; 715 q->rate_enable = 1;
711 q->new_flows.first = NULL; 716 q->new_flows.first = NULL;
@@ -732,15 +737,16 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
732 if (opts == NULL) 737 if (opts == NULL)
733 goto nla_put_failure; 738 goto nla_put_failure;
734 739
735 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore, 740 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
736 * do not bother giving its value 741
737 */
738 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || 742 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
739 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || 743 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
740 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || 744 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
741 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || 745 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
742 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || 746 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
743 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || 747 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
748 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
749 jiffies_to_usecs(q->flow_refill_delay)) ||
744 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) 750 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
745 goto nla_put_failure; 751 goto nla_put_failure;
746 752
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0e1e38b40025..717b2108f852 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1477,11 +1477,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1477 sch_tree_lock(sch); 1477 sch_tree_lock(sch);
1478 } 1478 }
1479 1479
1480 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1481
1482 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1483
1484 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1485 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1486
1480 /* it used to be a nasty bug here, we have to check that node 1487 /* it used to be a nasty bug here, we have to check that node
1481 * is really leaf before changing cl->un.leaf ! 1488 * is really leaf before changing cl->un.leaf !
1482 */ 1489 */
1483 if (!cl->level) { 1490 if (!cl->level) {
1484 cl->quantum = hopt->rate.rate / q->rate2quantum; 1491 u64 quantum = cl->rate.rate_bytes_ps;
1492
1493 do_div(quantum, q->rate2quantum);
1494 cl->quantum = min_t(u64, quantum, INT_MAX);
1495
1485 if (!hopt->quantum && cl->quantum < 1000) { 1496 if (!hopt->quantum && cl->quantum < 1000) {
1486 pr_warning( 1497 pr_warning(
1487 "HTB: quantum of class %X is small. Consider r2q change.\n", 1498 "HTB: quantum of class %X is small. Consider r2q change.\n",
@@ -1500,13 +1511,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1500 cl->prio = TC_HTB_NUMPRIO - 1; 1511 cl->prio = TC_HTB_NUMPRIO - 1;
1501 } 1512 }
1502 1513
1503 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1504
1505 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1506
1507 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1508 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1509
1510 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); 1514 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1511 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); 1515 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1512 1516
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 75c94e59a3bd..bccd52b36e97 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -215,10 +215,10 @@ static bool loss_4state(struct netem_sched_data *q)
215 if (rnd < clg->a4) { 215 if (rnd < clg->a4) {
216 clg->state = 4; 216 clg->state = 4;
217 return true; 217 return true;
218 } else if (clg->a4 < rnd && rnd < clg->a1) { 218 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
219 clg->state = 3; 219 clg->state = 3;
220 return true; 220 return true;
221 } else if (clg->a1 < rnd) 221 } else if (clg->a1 + clg->a4 < rnd)
222 clg->state = 1; 222 clg->state = 1;
223 223
224 break; 224 break;
@@ -268,10 +268,11 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
268 clg->state = 2; 268 clg->state = 2;
269 if (net_random() < clg->a4) 269 if (net_random() < clg->a4)
270 return true; 270 return true;
271 break;
271 case 2: 272 case 2:
272 if (net_random() < clg->a2) 273 if (net_random() < clg->a2)
273 clg->state = 1; 274 clg->state = 1;
274 if (clg->a3 > net_random()) 275 if (net_random() > clg->a3)
275 return true; 276 return true;
276 } 277 }
277 278
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 68f98595819c..887e672f9d7d 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -21,6 +21,7 @@
21#include <net/netlink.h> 21#include <net/netlink.h>
22#include <net/sch_generic.h> 22#include <net/sch_generic.h>
23#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
24#include <net/tcp.h>
24 25
25 26
26/* Simple Token Bucket Filter. 27/* Simple Token Bucket Filter.
@@ -117,6 +118,48 @@ struct tbf_sched_data {
117}; 118};
118 119
119 120
121/* Time to Length, convert time in ns to length in bytes
122 * to determinate how many bytes can be sent in given time.
123 */
124static u64 psched_ns_t2l(const struct psched_ratecfg *r,
125 u64 time_in_ns)
126{
127 /* The formula is :
128 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
129 */
130 u64 len = time_in_ns * r->rate_bytes_ps;
131
132 do_div(len, NSEC_PER_SEC);
133
134 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
135 do_div(len, 53);
136 len = len * 48;
137 }
138
139 if (len > r->overhead)
140 len -= r->overhead;
141 else
142 len = 0;
143
144 return len;
145}
146
147/*
148 * Return length of individual segments of a gso packet,
149 * including all headers (MAC, IP, TCP/UDP)
150 */
151static unsigned int skb_gso_seglen(const struct sk_buff *skb)
152{
153 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
154 const struct skb_shared_info *shinfo = skb_shinfo(skb);
155
156 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
157 hdr_len += tcp_hdrlen(skb);
158 else
159 hdr_len += sizeof(struct udphdr);
160 return hdr_len + shinfo->gso_size;
161}
162
120/* GSO packet is too big, segment it so that tbf can transmit 163/* GSO packet is too big, segment it so that tbf can transmit
121 * each segment in time 164 * each segment in time
122 */ 165 */
@@ -136,12 +179,8 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
136 while (segs) { 179 while (segs) {
137 nskb = segs->next; 180 nskb = segs->next;
138 segs->next = NULL; 181 segs->next = NULL;
139 if (likely(segs->len <= q->max_size)) { 182 qdisc_skb_cb(segs)->pkt_len = segs->len;
140 qdisc_skb_cb(segs)->pkt_len = segs->len; 183 ret = qdisc_enqueue(segs, q->qdisc);
141 ret = qdisc_enqueue(segs, q->qdisc);
142 } else {
143 ret = qdisc_reshape_fail(skb, sch);
144 }
145 if (ret != NET_XMIT_SUCCESS) { 184 if (ret != NET_XMIT_SUCCESS) {
146 if (net_xmit_drop_count(ret)) 185 if (net_xmit_drop_count(ret))
147 sch->qstats.drops++; 186 sch->qstats.drops++;
@@ -163,7 +202,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
163 int ret; 202 int ret;
164 203
165 if (qdisc_pkt_len(skb) > q->max_size) { 204 if (qdisc_pkt_len(skb) > q->max_size) {
166 if (skb_is_gso(skb)) 205 if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
167 return tbf_segment(skb, sch); 206 return tbf_segment(skb, sch);
168 return qdisc_reshape_fail(skb, sch); 207 return qdisc_reshape_fail(skb, sch);
169 } 208 }
@@ -276,10 +315,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
276 struct tbf_sched_data *q = qdisc_priv(sch); 315 struct tbf_sched_data *q = qdisc_priv(sch);
277 struct nlattr *tb[TCA_TBF_MAX + 1]; 316 struct nlattr *tb[TCA_TBF_MAX + 1];
278 struct tc_tbf_qopt *qopt; 317 struct tc_tbf_qopt *qopt;
279 struct qdisc_rate_table *rtab = NULL;
280 struct qdisc_rate_table *ptab = NULL;
281 struct Qdisc *child = NULL; 318 struct Qdisc *child = NULL;
282 int max_size, n; 319 struct psched_ratecfg rate;
320 struct psched_ratecfg peak;
321 u64 max_size;
322 s64 buffer, mtu;
283 u64 rate64 = 0, prate64 = 0; 323 u64 rate64 = 0, prate64 = 0;
284 324
285 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); 325 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
@@ -291,33 +331,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
291 goto done; 331 goto done;
292 332
293 qopt = nla_data(tb[TCA_TBF_PARMS]); 333 qopt = nla_data(tb[TCA_TBF_PARMS]);
294 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); 334 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
295 if (rtab == NULL) 335 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
296 goto done; 336 tb[TCA_TBF_RTAB]));
297 337
298 if (qopt->peakrate.rate) { 338 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
299 if (qopt->peakrate.rate > qopt->rate.rate) 339 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
300 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]); 340 tb[TCA_TBF_PTAB]));
301 if (ptab == NULL)
302 goto done;
303 }
304
305 for (n = 0; n < 256; n++)
306 if (rtab->data[n] > qopt->buffer)
307 break;
308 max_size = (n << qopt->rate.cell_log) - 1;
309 if (ptab) {
310 int size;
311
312 for (n = 0; n < 256; n++)
313 if (ptab->data[n] > qopt->mtu)
314 break;
315 size = (n << qopt->peakrate.cell_log) - 1;
316 if (size < max_size)
317 max_size = size;
318 }
319 if (max_size < 0)
320 goto done;
321 341
322 if (q->qdisc != &noop_qdisc) { 342 if (q->qdisc != &noop_qdisc) {
323 err = fifo_set_limit(q->qdisc, qopt->limit); 343 err = fifo_set_limit(q->qdisc, qopt->limit);
@@ -331,6 +351,39 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
331 } 351 }
332 } 352 }
333 353
354 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
355 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
356
357 if (tb[TCA_TBF_RATE64])
358 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
359 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
360
361 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
362
363 if (qopt->peakrate.rate) {
364 if (tb[TCA_TBF_PRATE64])
365 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
366 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
367 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
368 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
369 peak.rate_bytes_ps, rate.rate_bytes_ps);
370 err = -EINVAL;
371 goto done;
372 }
373
374 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
375 }
376
377 if (max_size < psched_mtu(qdisc_dev(sch)))
378 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
379 max_size, qdisc_dev(sch)->name,
380 psched_mtu(qdisc_dev(sch)));
381
382 if (!max_size) {
383 err = -EINVAL;
384 goto done;
385 }
386
334 sch_tree_lock(sch); 387 sch_tree_lock(sch);
335 if (child) { 388 if (child) {
336 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); 389 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
@@ -344,13 +397,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
344 q->tokens = q->buffer; 397 q->tokens = q->buffer;
345 q->ptokens = q->mtu; 398 q->ptokens = q->mtu;
346 399
347 if (tb[TCA_TBF_RATE64]) 400 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
348 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); 401 if (qopt->peakrate.rate) {
349 psched_ratecfg_precompute(&q->rate, &rtab->rate, rate64); 402 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
350 if (ptab) {
351 if (tb[TCA_TBF_PRATE64])
352 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
353 psched_ratecfg_precompute(&q->peak, &ptab->rate, prate64);
354 q->peak_present = true; 403 q->peak_present = true;
355 } else { 404 } else {
356 q->peak_present = false; 405 q->peak_present = false;
@@ -359,10 +408,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
359 sch_tree_unlock(sch); 408 sch_tree_unlock(sch);
360 err = 0; 409 err = 0;
361done: 410done:
362 if (rtab)
363 qdisc_put_rtab(rtab);
364 if (ptab)
365 qdisc_put_rtab(ptab);
366 return err; 411 return err;
367} 412}
368 413
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index c9b91cb1cb0d..31ed008c8e13 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -154,8 +154,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
154 154
155 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 155 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
156 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 156 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
157 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 157 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
158 min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
159 158
160 /* Initializes the timers */ 159 /* Initializes the timers */
161 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 160 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -291,8 +290,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
291 asoc->peer.ipv6_address = 1; 290 asoc->peer.ipv6_address = 1;
292 INIT_LIST_HEAD(&asoc->asocs); 291 INIT_LIST_HEAD(&asoc->asocs);
293 292
294 asoc->autoclose = sp->autoclose;
295
296 asoc->default_stream = sp->default_stream; 293 asoc->default_stream = sp->default_stream;
297 asoc->default_ppid = sp->default_ppid; 294 asoc->default_ppid = sp->default_ppid;
298 asoc->default_flags = sp->default_flags; 295 asoc->default_flags = sp->default_flags;
@@ -907,8 +904,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
907 if (!first || t->last_time_heard > first->last_time_heard) { 904 if (!first || t->last_time_heard > first->last_time_heard) {
908 second = first; 905 second = first;
909 first = t; 906 first = t;
910 } 907 } else if (!second ||
911 if (!second || t->last_time_heard > second->last_time_heard) 908 t->last_time_heard > second->last_time_heard)
912 second = t; 909 second = t;
913 } 910 }
914 911
@@ -929,6 +926,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
929 first = asoc->peer.primary_path; 926 first = asoc->peer.primary_path;
930 } 927 }
931 928
929 if (!second)
930 second = first;
932 /* If we failed to find a usable transport, just camp on the 931 /* If we failed to find a usable transport, just camp on the
933 * primary, even if it is inactive. 932 * primary, even if it is inactive.
934 */ 933 */
diff --git a/net/sctp/output.c b/net/sctp/output.c
index e650978daf27..0fb140f8f088 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -474,10 +474,11 @@ int sctp_packet_transmit(struct sctp_packet *packet)
474 * for a given destination transport address. 474 * for a given destination transport address.
475 */ 475 */
476 476
477 if (!tp->rto_pending) { 477 if (!chunk->resent && !tp->rto_pending) {
478 chunk->rtt_in_progress = 1; 478 chunk->rtt_in_progress = 1;
479 tp->rto_pending = 1; 479 tp->rto_pending = 1;
480 } 480 }
481
481 has_data = 1; 482 has_data = 1;
482 } 483 }
483 484
@@ -580,7 +581,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
580 unsigned long timeout; 581 unsigned long timeout;
581 582
582 /* Restart the AUTOCLOSE timer when sending data. */ 583 /* Restart the AUTOCLOSE timer when sending data. */
583 if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { 584 if (sctp_state(asoc, ESTABLISHED) &&
585 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
584 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; 586 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
585 timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; 587 timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
586 588
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 94df75877869..f51ba985a36e 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -446,6 +446,8 @@ void sctp_retransmit_mark(struct sctp_outq *q,
446 transport->rto_pending = 0; 446 transport->rto_pending = 0;
447 } 447 }
448 448
449 chunk->resent = 1;
450
449 /* Move the chunk to the retransmit queue. The chunks 451 /* Move the chunk to the retransmit queue. The chunks
450 * on the retransmit queue are always kept in order. 452 * on the retransmit queue are always kept in order.
451 */ 453 */
@@ -1375,6 +1377,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1375 * instance). 1377 * instance).
1376 */ 1378 */
1377 if (!tchunk->tsn_gap_acked && 1379 if (!tchunk->tsn_gap_acked &&
1380 !tchunk->resent &&
1378 tchunk->rtt_in_progress) { 1381 tchunk->rtt_in_progress) {
1379 tchunk->rtt_in_progress = 0; 1382 tchunk->rtt_in_progress = 0;
1380 rtt = jiffies - tchunk->sent_at; 1383 rtt = jiffies - tchunk->sent_at;
@@ -1391,7 +1394,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1391 */ 1394 */
1392 if (!tchunk->tsn_gap_acked) { 1395 if (!tchunk->tsn_gap_acked) {
1393 tchunk->tsn_gap_acked = 1; 1396 tchunk->tsn_gap_acked = 1;
1394 *highest_new_tsn_in_sack = tsn; 1397 if (TSN_lt(*highest_new_tsn_in_sack, tsn))
1398 *highest_new_tsn_in_sack = tsn;
1395 bytes_acked += sctp_data_size(tchunk); 1399 bytes_acked += sctp_data_size(tchunk);
1396 if (!tchunk->transport) 1400 if (!tchunk->transport)
1397 migrate_bytes += sctp_data_size(tchunk); 1401 migrate_bytes += sctp_data_size(tchunk);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index dfe3f36ff2aa..a26065be7289 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -820,7 +820,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
820 SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS); 820 SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
821 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); 821 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
822 822
823 if (new_asoc->autoclose) 823 if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
824 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 824 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
825 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 825 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
826 826
@@ -908,7 +908,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
908 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); 908 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
909 SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS); 909 SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
910 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); 910 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
911 if (asoc->autoclose) 911 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
912 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 912 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
913 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 913 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
914 914
@@ -2970,7 +2970,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
2970 if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM) 2970 if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
2971 force = SCTP_FORCE(); 2971 force = SCTP_FORCE();
2972 2972
2973 if (asoc->autoclose) { 2973 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
2974 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 2974 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
2975 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 2975 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
2976 } 2976 }
@@ -3878,7 +3878,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
3878 SCTP_CHUNK(chunk)); 3878 SCTP_CHUNK(chunk));
3879 3879
3880 /* Count this as receiving DATA. */ 3880 /* Count this as receiving DATA. */
3881 if (asoc->autoclose) { 3881 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
3882 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 3882 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
3883 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 3883 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
3884 } 3884 }
@@ -5267,7 +5267,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
5267 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 5267 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
5268 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5268 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5269 5269
5270 if (asoc->autoclose) 5270 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
5271 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5271 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5272 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 5272 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
5273 5273
@@ -5346,7 +5346,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
5346 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 5346 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
5347 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 5347 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
5348 5348
5349 if (asoc->autoclose) 5349 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
5350 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5350 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5351 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 5351 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
5352 5352
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 72046b9729a8..42b709c95cf3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2196,6 +2196,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2196 unsigned int optlen) 2196 unsigned int optlen)
2197{ 2197{
2198 struct sctp_sock *sp = sctp_sk(sk); 2198 struct sctp_sock *sp = sctp_sk(sk);
2199 struct net *net = sock_net(sk);
2199 2200
2200 /* Applicable to UDP-style socket only */ 2201 /* Applicable to UDP-style socket only */
2201 if (sctp_style(sk, TCP)) 2202 if (sctp_style(sk, TCP))
@@ -2205,6 +2206,9 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2205 if (copy_from_user(&sp->autoclose, optval, optlen)) 2206 if (copy_from_user(&sp->autoclose, optval, optlen))
2206 return -EFAULT; 2207 return -EFAULT;
2207 2208
2209 if (sp->autoclose > net->sctp.max_autoclose)
2210 sp->autoclose = net->sctp.max_autoclose;
2211
2208 return 0; 2212 return 0;
2209} 2213}
2210 2214
@@ -2811,6 +2815,8 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
2811{ 2815{
2812 struct sctp_rtoinfo rtoinfo; 2816 struct sctp_rtoinfo rtoinfo;
2813 struct sctp_association *asoc; 2817 struct sctp_association *asoc;
2818 unsigned long rto_min, rto_max;
2819 struct sctp_sock *sp = sctp_sk(sk);
2814 2820
2815 if (optlen != sizeof (struct sctp_rtoinfo)) 2821 if (optlen != sizeof (struct sctp_rtoinfo))
2816 return -EINVAL; 2822 return -EINVAL;
@@ -2824,26 +2830,36 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
2824 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2830 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
2825 return -EINVAL; 2831 return -EINVAL;
2826 2832
2833 rto_max = rtoinfo.srto_max;
2834 rto_min = rtoinfo.srto_min;
2835
2836 if (rto_max)
2837 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
2838 else
2839 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
2840
2841 if (rto_min)
2842 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
2843 else
2844 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
2845
2846 if (rto_min > rto_max)
2847 return -EINVAL;
2848
2827 if (asoc) { 2849 if (asoc) {
2828 if (rtoinfo.srto_initial != 0) 2850 if (rtoinfo.srto_initial != 0)
2829 asoc->rto_initial = 2851 asoc->rto_initial =
2830 msecs_to_jiffies(rtoinfo.srto_initial); 2852 msecs_to_jiffies(rtoinfo.srto_initial);
2831 if (rtoinfo.srto_max != 0) 2853 asoc->rto_max = rto_max;
2832 asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max); 2854 asoc->rto_min = rto_min;
2833 if (rtoinfo.srto_min != 0)
2834 asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min);
2835 } else { 2855 } else {
2836 /* If there is no association or the association-id = 0 2856 /* If there is no association or the association-id = 0
2837 * set the values to the endpoint. 2857 * set the values to the endpoint.
2838 */ 2858 */
2839 struct sctp_sock *sp = sctp_sk(sk);
2840
2841 if (rtoinfo.srto_initial != 0) 2859 if (rtoinfo.srto_initial != 0)
2842 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2860 sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
2843 if (rtoinfo.srto_max != 0) 2861 sp->rtoinfo.srto_max = rto_max;
2844 sp->rtoinfo.srto_max = rtoinfo.srto_max; 2862 sp->rtoinfo.srto_min = rto_min;
2845 if (rtoinfo.srto_min != 0)
2846 sp->rtoinfo.srto_min = rtoinfo.srto_min;
2847 } 2863 }
2848 2864
2849 return 0; 2865 return 0;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 6b36561a1b3b..b0565afb61c7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -56,11 +56,16 @@ extern long sysctl_sctp_mem[3];
56extern int sysctl_sctp_rmem[3]; 56extern int sysctl_sctp_rmem[3];
57extern int sysctl_sctp_wmem[3]; 57extern int sysctl_sctp_wmem[3];
58 58
59static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, 59static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
60 int write, 60 void __user *buffer, size_t *lenp,
61 loff_t *ppos);
62static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
63 void __user *buffer, size_t *lenp,
64 loff_t *ppos);
65static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
61 void __user *buffer, size_t *lenp, 66 void __user *buffer, size_t *lenp,
62
63 loff_t *ppos); 67 loff_t *ppos);
68
64static struct ctl_table sctp_table[] = { 69static struct ctl_table sctp_table[] = {
65 { 70 {
66 .procname = "sctp_mem", 71 .procname = "sctp_mem",
@@ -102,17 +107,17 @@ static struct ctl_table sctp_net_table[] = {
102 .data = &init_net.sctp.rto_min, 107 .data = &init_net.sctp.rto_min,
103 .maxlen = sizeof(unsigned int), 108 .maxlen = sizeof(unsigned int),
104 .mode = 0644, 109 .mode = 0644,
105 .proc_handler = proc_dointvec_minmax, 110 .proc_handler = proc_sctp_do_rto_min,
106 .extra1 = &one, 111 .extra1 = &one,
107 .extra2 = &timer_max 112 .extra2 = &init_net.sctp.rto_max
108 }, 113 },
109 { 114 {
110 .procname = "rto_max", 115 .procname = "rto_max",
111 .data = &init_net.sctp.rto_max, 116 .data = &init_net.sctp.rto_max,
112 .maxlen = sizeof(unsigned int), 117 .maxlen = sizeof(unsigned int),
113 .mode = 0644, 118 .mode = 0644,
114 .proc_handler = proc_dointvec_minmax, 119 .proc_handler = proc_sctp_do_rto_max,
115 .extra1 = &one, 120 .extra1 = &init_net.sctp.rto_min,
116 .extra2 = &timer_max 121 .extra2 = &timer_max
117 }, 122 },
118 { 123 {
@@ -294,8 +299,7 @@ static struct ctl_table sctp_net_table[] = {
294 { /* sentinel */ } 299 { /* sentinel */ }
295}; 300};
296 301
297static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, 302static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
298 int write,
299 void __user *buffer, size_t *lenp, 303 void __user *buffer, size_t *lenp,
300 loff_t *ppos) 304 loff_t *ppos)
301{ 305{
@@ -342,6 +346,60 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
342 return ret; 346 return ret;
343} 347}
344 348
349static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
350 void __user *buffer, size_t *lenp,
351 loff_t *ppos)
352{
353 struct net *net = current->nsproxy->net_ns;
354 int new_value;
355 struct ctl_table tbl;
356 unsigned int min = *(unsigned int *) ctl->extra1;
357 unsigned int max = *(unsigned int *) ctl->extra2;
358 int ret;
359
360 memset(&tbl, 0, sizeof(struct ctl_table));
361 tbl.maxlen = sizeof(unsigned int);
362
363 if (write)
364 tbl.data = &new_value;
365 else
366 tbl.data = &net->sctp.rto_min;
367 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
368 if (write) {
369 if (ret || new_value > max || new_value < min)
370 return -EINVAL;
371 net->sctp.rto_min = new_value;
372 }
373 return ret;
374}
375
376static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
377 void __user *buffer, size_t *lenp,
378 loff_t *ppos)
379{
380 struct net *net = current->nsproxy->net_ns;
381 int new_value;
382 struct ctl_table tbl;
383 unsigned int min = *(unsigned int *) ctl->extra1;
384 unsigned int max = *(unsigned int *) ctl->extra2;
385 int ret;
386
387 memset(&tbl, 0, sizeof(struct ctl_table));
388 tbl.maxlen = sizeof(unsigned int);
389
390 if (write)
391 tbl.data = &new_value;
392 else
393 tbl.data = &net->sctp.rto_max;
394 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
395 if (write) {
396 if (ret || new_value > max || new_value < min)
397 return -EINVAL;
398 net->sctp.rto_max = new_value;
399 }
400 return ret;
401}
402
345int sctp_sysctl_net_register(struct net *net) 403int sctp_sysctl_net_register(struct net *net)
346{ 404{
347 struct ctl_table *table; 405 struct ctl_table *table;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index e332efb124cc..efc46ffed1fd 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -573,7 +573,7 @@ void sctp_transport_burst_limited(struct sctp_transport *t)
573 u32 old_cwnd = t->cwnd; 573 u32 old_cwnd = t->cwnd;
574 u32 max_burst_bytes; 574 u32 max_burst_bytes;
575 575
576 if (t->burst_limited) 576 if (t->burst_limited || asoc->max_burst == 0)
577 return; 577 return;
578 578
579 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); 579 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
diff --git a/net/socket.c b/net/socket.c
index c226aceee65b..e83c416708af 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -221,12 +221,13 @@ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
221 int err; 221 int err;
222 int len; 222 int len;
223 223
224 BUG_ON(klen > sizeof(struct sockaddr_storage));
224 err = get_user(len, ulen); 225 err = get_user(len, ulen);
225 if (err) 226 if (err)
226 return err; 227 return err;
227 if (len > klen) 228 if (len > klen)
228 len = klen; 229 len = klen;
229 if (len < 0 || len > sizeof(struct sockaddr_storage)) 230 if (len < 0)
230 return -EINVAL; 231 return -EINVAL;
231 if (len) { 232 if (len) {
232 if (audit_sockaddr(klen, kaddr)) 233 if (audit_sockaddr(klen, kaddr))
@@ -1840,8 +1841,10 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1840 msg.msg_iov = &iov; 1841 msg.msg_iov = &iov;
1841 iov.iov_len = size; 1842 iov.iov_len = size;
1842 iov.iov_base = ubuf; 1843 iov.iov_base = ubuf;
1843 msg.msg_name = (struct sockaddr *)&address; 1844 /* Save some cycles and don't copy the address if not needed */
1844 msg.msg_namelen = sizeof(address); 1845 msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
1846 /* We assume all kernel code knows the size of sockaddr_storage */
1847 msg.msg_namelen = 0;
1845 if (sock->file->f_flags & O_NONBLOCK) 1848 if (sock->file->f_flags & O_NONBLOCK)
1846 flags |= MSG_DONTWAIT; 1849 flags |= MSG_DONTWAIT;
1847 err = sock_recvmsg(sock, &msg, size, flags); 1850 err = sock_recvmsg(sock, &msg, size, flags);
@@ -1970,7 +1973,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
1970 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) 1973 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
1971 return -EFAULT; 1974 return -EFAULT;
1972 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 1975 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
1973 return -EINVAL; 1976 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
1974 return 0; 1977 return 0;
1975} 1978}
1976 1979
@@ -2221,16 +2224,14 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2221 goto out; 2224 goto out;
2222 } 2225 }
2223 2226
2224 /* 2227 /* Save the user-mode address (verify_iovec will change the
2225 * Save the user-mode address (verify_iovec will change the 2228 * kernel msghdr to use the kernel address space)
2226 * kernel msghdr to use the kernel address space)
2227 */ 2229 */
2228
2229 uaddr = (__force void __user *)msg_sys->msg_name; 2230 uaddr = (__force void __user *)msg_sys->msg_name;
2230 uaddr_len = COMPAT_NAMELEN(msg); 2231 uaddr_len = COMPAT_NAMELEN(msg);
2231 if (MSG_CMSG_COMPAT & flags) { 2232 if (MSG_CMSG_COMPAT & flags)
2232 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE); 2233 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
2233 } else 2234 else
2234 err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE); 2235 err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
2235 if (err < 0) 2236 if (err < 0)
2236 goto out_freeiov; 2237 goto out_freeiov;
@@ -2239,6 +2240,9 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2239 cmsg_ptr = (unsigned long)msg_sys->msg_control; 2240 cmsg_ptr = (unsigned long)msg_sys->msg_control;
2240 msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 2241 msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
2241 2242
2243 /* We assume all kernel code knows the size of sockaddr_storage */
2244 msg_sys->msg_namelen = 0;
2245
2242 if (sock->file->f_flags & O_NONBLOCK) 2246 if (sock->file->f_flags & O_NONBLOCK)
2243 flags |= MSG_DONTWAIT; 2247 flags |= MSG_DONTWAIT;
2244 err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, 2248 err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys,
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 97912b40c254..42fdfc634e56 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1517,7 +1517,7 @@ out:
1517static int 1517static int
1518gss_refresh_null(struct rpc_task *task) 1518gss_refresh_null(struct rpc_task *task)
1519{ 1519{
1520 return -EACCES; 1520 return 0;
1521} 1521}
1522 1522
1523static __be32 * 1523static __be32 *
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index d0d14a04dce1..bf04b30a788a 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -471,15 +471,6 @@ struct rpc_filelist {
471 umode_t mode; 471 umode_t mode;
472}; 472};
473 473
474static int rpc_delete_dentry(const struct dentry *dentry)
475{
476 return 1;
477}
478
479static const struct dentry_operations rpc_dentry_operations = {
480 .d_delete = rpc_delete_dentry,
481};
482
483static struct inode * 474static struct inode *
484rpc_get_inode(struct super_block *sb, umode_t mode) 475rpc_get_inode(struct super_block *sb, umode_t mode)
485{ 476{
@@ -1266,7 +1257,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
1266 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1257 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1267 sb->s_magic = RPCAUTH_GSSMAGIC; 1258 sb->s_magic = RPCAUTH_GSSMAGIC;
1268 sb->s_op = &s_ops; 1259 sb->s_op = &s_ops;
1269 sb->s_d_op = &rpc_dentry_operations; 1260 sb->s_d_op = &simple_dentry_operations;
1270 sb->s_time_gran = 1; 1261 sb->s_time_gran = 1;
1271 1262
1272 inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO); 1263 inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index fd4eeeaa972a..c6d3f75a9e1b 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -113,7 +113,6 @@ err:
113static void tipc_core_stop(void) 113static void tipc_core_stop(void)
114{ 114{
115 tipc_netlink_stop(); 115 tipc_netlink_stop();
116 tipc_handler_stop();
117 tipc_cfg_stop(); 116 tipc_cfg_stop();
118 tipc_subscr_stop(); 117 tipc_subscr_stop();
119 tipc_nametbl_stop(); 118 tipc_nametbl_stop();
@@ -146,9 +145,10 @@ static int tipc_core_start(void)
146 res = tipc_subscr_start(); 145 res = tipc_subscr_start();
147 if (!res) 146 if (!res)
148 res = tipc_cfg_init(); 147 res = tipc_cfg_init();
149 if (res) 148 if (res) {
149 tipc_handler_stop();
150 tipc_core_stop(); 150 tipc_core_stop();
151 151 }
152 return res; 152 return res;
153} 153}
154 154
@@ -178,6 +178,7 @@ static int __init tipc_init(void)
178 178
179static void __exit tipc_exit(void) 179static void __exit tipc_exit(void)
180{ 180{
181 tipc_handler_stop();
181 tipc_core_stop_net(); 182 tipc_core_stop_net();
182 tipc_core_stop(); 183 tipc_core_stop();
183 pr_info("Deactivated\n"); 184 pr_info("Deactivated\n");
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index b36f0fcd9bdf..e4bc8a296744 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -56,12 +56,13 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
56{ 56{
57 struct queue_item *item; 57 struct queue_item *item;
58 58
59 spin_lock_bh(&qitem_lock);
59 if (!handler_enabled) { 60 if (!handler_enabled) {
60 pr_err("Signal request ignored by handler\n"); 61 pr_err("Signal request ignored by handler\n");
62 spin_unlock_bh(&qitem_lock);
61 return -ENOPROTOOPT; 63 return -ENOPROTOOPT;
62 } 64 }
63 65
64 spin_lock_bh(&qitem_lock);
65 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC); 66 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
66 if (!item) { 67 if (!item) {
67 pr_err("Signal queue out of memory\n"); 68 pr_err("Signal queue out of memory\n");
@@ -112,10 +113,14 @@ void tipc_handler_stop(void)
112 struct list_head *l, *n; 113 struct list_head *l, *n;
113 struct queue_item *item; 114 struct queue_item *item;
114 115
115 if (!handler_enabled) 116 spin_lock_bh(&qitem_lock);
117 if (!handler_enabled) {
118 spin_unlock_bh(&qitem_lock);
116 return; 119 return;
117 120 }
118 handler_enabled = 0; 121 handler_enabled = 0;
122 spin_unlock_bh(&qitem_lock);
123
119 tasklet_kill(&tipc_tasklet); 124 tasklet_kill(&tipc_tasklet);
120 125
121 spin_lock_bh(&qitem_lock); 126 spin_lock_bh(&qitem_lock);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index cf465d66ccde..69cd9bf3f561 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -2358,7 +2358,8 @@ int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
2358 *head = frag; 2358 *head = frag;
2359 skb_frag_list_init(*head); 2359 skb_frag_list_init(*head);
2360 return 0; 2360 return 0;
2361 } else if (skb_try_coalesce(*head, frag, &headstolen, &delta)) { 2361 } else if (*head &&
2362 skb_try_coalesce(*head, frag, &headstolen, &delta)) {
2362 kfree_skb_partial(frag, headstolen); 2363 kfree_skb_partial(frag, headstolen);
2363 } else { 2364 } else {
2364 if (!*head) 2365 if (!*head)
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 8bcd4985d0fb..9f72a6376362 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -76,9 +76,11 @@ static struct genl_family tipc_genl_family = {
76 .maxattr = 0, 76 .maxattr = 0,
77}; 77};
78 78
79static struct genl_ops tipc_genl_ops = { 79static struct genl_ops tipc_genl_ops[] = {
80 .cmd = TIPC_GENL_CMD, 80 {
81 .doit = handle_cmd, 81 .cmd = TIPC_GENL_CMD,
82 .doit = handle_cmd,
83 },
82}; 84};
83 85
84static int tipc_genl_family_registered; 86static int tipc_genl_family_registered;
@@ -87,8 +89,7 @@ int tipc_netlink_start(void)
87{ 89{
88 int res; 90 int res;
89 91
90 res = genl_register_family_with_ops(&tipc_genl_family, 92 res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops);
91 &tipc_genl_ops, 1);
92 if (res) { 93 if (res) {
93 pr_err("Failed to register netlink interface\n"); 94 pr_err("Failed to register netlink interface\n");
94 return res; 95 return res;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3906527259d1..3b61851bb927 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -980,9 +980,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
980 goto exit; 980 goto exit;
981 } 981 }
982 982
983 /* will be updated in set_orig_addr() if needed */
984 m->msg_namelen = 0;
985
986 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 983 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
987restart: 984restart:
988 985
@@ -1091,9 +1088,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1091 goto exit; 1088 goto exit;
1092 } 1089 }
1093 1090
1094 /* will be updated in set_orig_addr() if needed */
1095 m->msg_namelen = 0;
1096
1097 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); 1091 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1098 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1092 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1099 1093
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c1f403bed683..a0ca162e5bd5 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -530,13 +530,17 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
530static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, 530static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
531 struct msghdr *, size_t, int); 531 struct msghdr *, size_t, int);
532 532
533static void unix_set_peek_off(struct sock *sk, int val) 533static int unix_set_peek_off(struct sock *sk, int val)
534{ 534{
535 struct unix_sock *u = unix_sk(sk); 535 struct unix_sock *u = unix_sk(sk);
536 536
537 mutex_lock(&u->readlock); 537 if (mutex_lock_interruptible(&u->readlock))
538 return -EINTR;
539
538 sk->sk_peek_off = val; 540 sk->sk_peek_off = val;
539 mutex_unlock(&u->readlock); 541 mutex_unlock(&u->readlock);
542
543 return 0;
540} 544}
541 545
542 546
@@ -1754,7 +1758,6 @@ static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1754{ 1758{
1755 struct unix_sock *u = unix_sk(sk); 1759 struct unix_sock *u = unix_sk(sk);
1756 1760
1757 msg->msg_namelen = 0;
1758 if (u->addr) { 1761 if (u->addr) {
1759 msg->msg_namelen = u->addr->len; 1762 msg->msg_namelen = u->addr->len;
1760 memcpy(msg->msg_name, u->addr->name, u->addr->len); 1763 memcpy(msg->msg_name, u->addr->name, u->addr->len);
@@ -1778,8 +1781,6 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1778 if (flags&MSG_OOB) 1781 if (flags&MSG_OOB)
1779 goto out; 1782 goto out;
1780 1783
1781 msg->msg_namelen = 0;
1782
1783 err = mutex_lock_interruptible(&u->readlock); 1784 err = mutex_lock_interruptible(&u->readlock);
1784 if (err) { 1785 if (err) {
1785 err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); 1786 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
@@ -1924,8 +1925,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1924 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); 1925 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1925 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); 1926 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1926 1927
1927 msg->msg_namelen = 0;
1928
1929 /* Lock the socket to prevent queue disordering 1928 /* Lock the socket to prevent queue disordering
1930 * while sleeps in memcpy_tomsg 1929 * while sleeps in memcpy_tomsg
1931 */ 1930 */
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 545c08b8a1d4..5adfd94c5b85 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1662,8 +1662,6 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
1662 vsk = vsock_sk(sk); 1662 vsk = vsock_sk(sk);
1663 err = 0; 1663 err = 0;
1664 1664
1665 msg->msg_namelen = 0;
1666
1667 lock_sock(sk); 1665 lock_sock(sk);
1668 1666
1669 if (sk->sk_state != SS_CONNECTED) { 1667 if (sk->sk_state != SS_CONNECTED) {
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 9d6986634e0b..687360da62d9 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1746,8 +1746,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
1746 if (flags & MSG_OOB || flags & MSG_ERRQUEUE) 1746 if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
1747 return -EOPNOTSUPP; 1747 return -EOPNOTSUPP;
1748 1748
1749 msg->msg_namelen = 0;
1750
1751 /* Retrieve the head sk_buff from the socket's receive queue. */ 1749 /* Retrieve the head sk_buff from the socket's receive queue. */
1752 err = 0; 1750 err = 0;
1753 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); 1751 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index 0694d62e4dbc..c278b3356f75 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -279,7 +279,7 @@ int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
279 279
280 d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size); 280 d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size);
281 d_dump(2, dev, msg, size); 281 d_dump(2, dev, msg, size);
282 genlmsg_multicast(skb, 0, wimax_gnl_mcg.id, GFP_KERNEL); 282 genlmsg_multicast(&wimax_gnl_family, skb, 0, 0, GFP_KERNEL);
283 d_printf(1, dev, "CTX: genl multicast done\n"); 283 d_printf(1, dev, "CTX: genl multicast done\n");
284 return 0; 284 return 0;
285} 285}
@@ -321,17 +321,6 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
321} 321}
322EXPORT_SYMBOL_GPL(wimax_msg); 322EXPORT_SYMBOL_GPL(wimax_msg);
323 323
324
325static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
326 [WIMAX_GNL_MSG_IFIDX] = {
327 .type = NLA_U32,
328 },
329 [WIMAX_GNL_MSG_DATA] = {
330 .type = NLA_UNSPEC, /* libnl doesn't grok BINARY yet */
331 },
332};
333
334
335/* 324/*
336 * Relays a message from user space to the driver 325 * Relays a message from user space to the driver
337 * 326 *
@@ -340,7 +329,6 @@ static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
340 * 329 *
341 * This call will block while handling/relaying the message. 330 * This call will block while handling/relaying the message.
342 */ 331 */
343static
344int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info) 332int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
345{ 333{
346 int result, ifindex; 334 int result, ifindex;
@@ -418,16 +406,3 @@ error_no_wimax_dev:
418 return result; 406 return result;
419} 407}
420 408
421
422/*
423 * Generic Netlink glue
424 */
425
426struct genl_ops wimax_gnl_msg_from_user = {
427 .cmd = WIMAX_GNL_OP_MSG_FROM_USER,
428 .flags = GENL_ADMIN_PERM,
429 .policy = wimax_gnl_msg_policy,
430 .doit = wimax_gnl_doit_msg_from_user,
431 .dumpit = NULL,
432};
433
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 7ceffe39d70e..eb4580784d9d 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -92,13 +92,6 @@ int wimax_reset(struct wimax_dev *wimax_dev)
92EXPORT_SYMBOL(wimax_reset); 92EXPORT_SYMBOL(wimax_reset);
93 93
94 94
95static const struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
96 [WIMAX_GNL_RESET_IFIDX] = {
97 .type = NLA_U32,
98 },
99};
100
101
102/* 95/*
103 * Exporting to user space over generic netlink 96 * Exporting to user space over generic netlink
104 * 97 *
@@ -106,7 +99,6 @@ static const struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] =
106 * 99 *
107 * No attributes. 100 * No attributes.
108 */ 101 */
109static
110int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) 102int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
111{ 103{
112 int result, ifindex; 104 int result, ifindex;
@@ -130,12 +122,3 @@ error_no_wimax_dev:
130 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); 122 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
131 return result; 123 return result;
132} 124}
133
134
135struct genl_ops wimax_gnl_reset = {
136 .cmd = WIMAX_GNL_OP_RESET,
137 .flags = GENL_ADMIN_PERM,
138 .policy = wimax_gnl_reset_policy,
139 .doit = wimax_gnl_doit_reset,
140 .dumpit = NULL,
141};
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 7ab60babdd22..403078d670a9 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -411,17 +411,6 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
411 * just query). 411 * just query).
412 */ 412 */
413 413
414static const struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
415 [WIMAX_GNL_RFKILL_IFIDX] = {
416 .type = NLA_U32,
417 },
418 [WIMAX_GNL_RFKILL_STATE] = {
419 .type = NLA_U32 /* enum wimax_rf_state */
420 },
421};
422
423
424static
425int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info) 414int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info)
426{ 415{
427 int result, ifindex; 416 int result, ifindex;
@@ -457,13 +446,3 @@ error_no_wimax_dev:
457 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); 446 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
458 return result; 447 return result;
459} 448}
460
461
462struct genl_ops wimax_gnl_rfkill = {
463 .cmd = WIMAX_GNL_OP_RFKILL,
464 .flags = GENL_ADMIN_PERM,
465 .policy = wimax_gnl_rfkill_policy,
466 .doit = wimax_gnl_doit_rfkill,
467 .dumpit = NULL,
468};
469
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index aff8776e2d41..995c08c827b5 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -33,13 +33,6 @@
33#include "debug-levels.h" 33#include "debug-levels.h"
34 34
35 35
36static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
37 [WIMAX_GNL_STGET_IFIDX] = {
38 .type = NLA_U32,
39 },
40};
41
42
43/* 36/*
44 * Exporting to user space over generic netlink 37 * Exporting to user space over generic netlink
45 * 38 *
@@ -48,7 +41,6 @@ static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1
48 * 41 *
49 * No attributes. 42 * No attributes.
50 */ 43 */
51static
52int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) 44int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
53{ 45{
54 int result, ifindex; 46 int result, ifindex;
@@ -72,12 +64,3 @@ error_no_wimax_dev:
72 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); 64 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
73 return result; 65 return result;
74} 66}
75
76
77struct genl_ops wimax_gnl_state_get = {
78 .cmd = WIMAX_GNL_OP_STATE_GET,
79 .flags = GENL_ADMIN_PERM,
80 .policy = wimax_gnl_state_get_policy,
81 .doit = wimax_gnl_doit_state_get,
82 .dumpit = NULL,
83};
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index a6470ac39498..ec8b577db135 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -116,8 +116,9 @@ struct sk_buff *wimax_gnl_re_state_change_alloc(
116 dev_err(dev, "RE_STCH: can't create message\n"); 116 dev_err(dev, "RE_STCH: can't create message\n");
117 goto error_new; 117 goto error_new;
118 } 118 }
119 data = genlmsg_put(report_skb, 0, wimax_gnl_mcg.id, &wimax_gnl_family, 119 /* FIXME: sending a group ID as the seq is wrong */
120 0, WIMAX_GNL_RE_STATE_CHANGE); 120 data = genlmsg_put(report_skb, 0, wimax_gnl_family.mcgrp_offset,
121 &wimax_gnl_family, 0, WIMAX_GNL_RE_STATE_CHANGE);
121 if (data == NULL) { 122 if (data == NULL) {
122 dev_err(dev, "RE_STCH: can't put data into message\n"); 123 dev_err(dev, "RE_STCH: can't put data into message\n");
123 goto error_put; 124 goto error_put;
@@ -177,7 +178,7 @@ int wimax_gnl_re_state_change_send(
177 goto out; 178 goto out;
178 } 179 }
179 genlmsg_end(report_skb, header); 180 genlmsg_end(report_skb, header);
180 genlmsg_multicast(report_skb, 0, wimax_gnl_mcg.id, GFP_KERNEL); 181 genlmsg_multicast(&wimax_gnl_family, report_skb, 0, 0, GFP_KERNEL);
181out: 182out:
182 d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n", 183 d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n",
183 wimax_dev, report_skb, result); 184 wimax_dev, report_skb, result);
@@ -402,22 +403,44 @@ void wimax_dev_init(struct wimax_dev *wimax_dev)
402} 403}
403EXPORT_SYMBOL_GPL(wimax_dev_init); 404EXPORT_SYMBOL_GPL(wimax_dev_init);
404 405
405/* 406static const struct nla_policy wimax_gnl_policy[WIMAX_GNL_ATTR_MAX + 1] = {
406 * This extern is declared here because it's easier to keep track -- 407 [WIMAX_GNL_RESET_IFIDX] = { .type = NLA_U32, },
407 * both declarations are a list of the same 408 [WIMAX_GNL_RFKILL_IFIDX] = { .type = NLA_U32, },
408 */ 409 [WIMAX_GNL_RFKILL_STATE] = {
409extern struct genl_ops 410 .type = NLA_U32 /* enum wimax_rf_state */
410 wimax_gnl_msg_from_user, 411 },
411 wimax_gnl_reset, 412 [WIMAX_GNL_STGET_IFIDX] = { .type = NLA_U32, },
412 wimax_gnl_rfkill, 413 [WIMAX_GNL_MSG_IFIDX] = { .type = NLA_U32, },
413 wimax_gnl_state_get; 414 [WIMAX_GNL_MSG_DATA] = {
415 .type = NLA_UNSPEC, /* libnl doesn't grok BINARY yet */
416 },
417};
414 418
415static 419static const struct genl_ops wimax_gnl_ops[] = {
416struct genl_ops *wimax_gnl_ops[] = { 420 {
417 &wimax_gnl_msg_from_user, 421 .cmd = WIMAX_GNL_OP_MSG_FROM_USER,
418 &wimax_gnl_reset, 422 .flags = GENL_ADMIN_PERM,
419 &wimax_gnl_rfkill, 423 .policy = wimax_gnl_policy,
420 &wimax_gnl_state_get, 424 .doit = wimax_gnl_doit_msg_from_user,
425 },
426 {
427 .cmd = WIMAX_GNL_OP_RESET,
428 .flags = GENL_ADMIN_PERM,
429 .policy = wimax_gnl_policy,
430 .doit = wimax_gnl_doit_reset,
431 },
432 {
433 .cmd = WIMAX_GNL_OP_RFKILL,
434 .flags = GENL_ADMIN_PERM,
435 .policy = wimax_gnl_policy,
436 .doit = wimax_gnl_doit_rfkill,
437 },
438 {
439 .cmd = WIMAX_GNL_OP_STATE_GET,
440 .flags = GENL_ADMIN_PERM,
441 .policy = wimax_gnl_policy,
442 .doit = wimax_gnl_doit_state_get,
443 },
421}; 444};
422 445
423 446
@@ -557,8 +580,8 @@ struct genl_family wimax_gnl_family = {
557 .maxattr = WIMAX_GNL_ATTR_MAX, 580 .maxattr = WIMAX_GNL_ATTR_MAX,
558}; 581};
559 582
560struct genl_multicast_group wimax_gnl_mcg = { 583static const struct genl_multicast_group wimax_gnl_mcgrps[] = {
561 .name = "msg", 584 { .name = "msg", },
562}; 585};
563 586
564 587
@@ -567,7 +590,7 @@ struct genl_multicast_group wimax_gnl_mcg = {
567static 590static
568int __init wimax_subsys_init(void) 591int __init wimax_subsys_init(void)
569{ 592{
570 int result, cnt; 593 int result;
571 594
572 d_fnstart(4, NULL, "()\n"); 595 d_fnstart(4, NULL, "()\n");
573 d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params, 596 d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params,
@@ -575,38 +598,18 @@ int __init wimax_subsys_init(void)
575 598
576 snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name), 599 snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name),
577 "WiMAX"); 600 "WiMAX");
578 result = genl_register_family(&wimax_gnl_family); 601 result = genl_register_family_with_ops_groups(&wimax_gnl_family,
602 wimax_gnl_ops,
603 wimax_gnl_mcgrps);
579 if (unlikely(result < 0)) { 604 if (unlikely(result < 0)) {
580 printk(KERN_ERR "cannot register generic netlink family: %d\n", 605 printk(KERN_ERR "cannot register generic netlink family: %d\n",
581 result); 606 result);
582 goto error_register_family; 607 goto error_register_family;
583 } 608 }
584 609
585 for (cnt = 0; cnt < ARRAY_SIZE(wimax_gnl_ops); cnt++) {
586 result = genl_register_ops(&wimax_gnl_family,
587 wimax_gnl_ops[cnt]);
588 d_printf(4, NULL, "registering generic netlink op code "
589 "%u: %d\n", wimax_gnl_ops[cnt]->cmd, result);
590 if (unlikely(result < 0)) {
591 printk(KERN_ERR "cannot register generic netlink op "
592 "code %u: %d\n",
593 wimax_gnl_ops[cnt]->cmd, result);
594 goto error_register_ops;
595 }
596 }
597
598 result = genl_register_mc_group(&wimax_gnl_family, &wimax_gnl_mcg);
599 if (result < 0)
600 goto error_mc_group;
601 d_fnend(4, NULL, "() = 0\n"); 610 d_fnend(4, NULL, "() = 0\n");
602 return 0; 611 return 0;
603 612
604error_mc_group:
605error_register_ops:
606 for (cnt--; cnt >= 0; cnt--)
607 genl_unregister_ops(&wimax_gnl_family,
608 wimax_gnl_ops[cnt]);
609 genl_unregister_family(&wimax_gnl_family);
610error_register_family: 613error_register_family:
611 d_fnend(4, NULL, "() = %d\n", result); 614 d_fnend(4, NULL, "() = %d\n", result);
612 return result; 615 return result;
@@ -619,12 +622,7 @@ module_init(wimax_subsys_init);
619static 622static
620void __exit wimax_subsys_exit(void) 623void __exit wimax_subsys_exit(void)
621{ 624{
622 int cnt;
623 wimax_id_table_release(); 625 wimax_id_table_release();
624 genl_unregister_mc_group(&wimax_gnl_family, &wimax_gnl_mcg);
625 for (cnt = ARRAY_SIZE(wimax_gnl_ops) - 1; cnt >= 0; cnt--)
626 genl_unregister_ops(&wimax_gnl_family,
627 wimax_gnl_ops[cnt]);
628 genl_unregister_family(&wimax_gnl_family); 626 genl_unregister_family(&wimax_gnl_family);
629} 627}
630module_exit(wimax_subsys_exit); 628module_exit(wimax_subsys_exit);
diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h
index 5dcd9c067bf0..b445b82020a8 100644
--- a/net/wimax/wimax-internal.h
+++ b/net/wimax/wimax-internal.h
@@ -84,8 +84,14 @@ void wimax_id_table_release(void);
84int wimax_rfkill_add(struct wimax_dev *); 84int wimax_rfkill_add(struct wimax_dev *);
85void wimax_rfkill_rm(struct wimax_dev *); 85void wimax_rfkill_rm(struct wimax_dev *);
86 86
87/* generic netlink */
87extern struct genl_family wimax_gnl_family; 88extern struct genl_family wimax_gnl_family;
88extern struct genl_multicast_group wimax_gnl_mcg; 89
90/* ops */
91int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info);
92int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info);
93int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info);
94int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info);
89 95
90#endif /* #ifdef __KERNEL__ */ 96#endif /* #ifdef __KERNEL__ */
91#endif /* #ifndef __WIMAX_INTERNAL_H__ */ 97#endif /* #ifndef __WIMAX_INTERNAL_H__ */
diff --git a/net/wireless/core.c b/net/wireless/core.c
index aff959e5a1b3..52b865fb7351 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -451,6 +451,15 @@ int wiphy_register(struct wiphy *wiphy)
451 int i; 451 int i;
452 u16 ifmodes = wiphy->interface_modes; 452 u16 ifmodes = wiphy->interface_modes;
453 453
454 /* support for 5/10 MHz is broken due to nl80211 API mess - disable */
455 wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ;
456
457 /*
458 * There are major locking problems in nl80211/mac80211 for CSA,
459 * disable for all drivers until this has been reworked.
460 */
461 wiphy->flags &= ~WIPHY_FLAG_HAS_CHANNEL_SWITCH;
462
454#ifdef CONFIG_PM 463#ifdef CONFIG_PM
455 if (WARN_ON(wiphy->wowlan && 464 if (WARN_ON(wiphy->wowlan &&
456 (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && 465 (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 9d797df56649..89737ee2669a 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -262,7 +262,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
262 262
263 /* try to find an IBSS channel if none requested ... */ 263 /* try to find an IBSS channel if none requested ... */
264 if (!wdev->wext.ibss.chandef.chan) { 264 if (!wdev->wext.ibss.chandef.chan) {
265 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; 265 struct ieee80211_channel *new_chan = NULL;
266 266
267 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 267 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
268 struct ieee80211_supported_band *sband; 268 struct ieee80211_supported_band *sband;
@@ -278,18 +278,19 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
278 continue; 278 continue;
279 if (chan->flags & IEEE80211_CHAN_DISABLED) 279 if (chan->flags & IEEE80211_CHAN_DISABLED)
280 continue; 280 continue;
281 wdev->wext.ibss.chandef.chan = chan; 281 new_chan = chan;
282 wdev->wext.ibss.chandef.center_freq1 =
283 chan->center_freq;
284 break; 282 break;
285 } 283 }
286 284
287 if (wdev->wext.ibss.chandef.chan) 285 if (new_chan)
288 break; 286 break;
289 } 287 }
290 288
291 if (!wdev->wext.ibss.chandef.chan) 289 if (!new_chan)
292 return -EINVAL; 290 return -EINVAL;
291
292 cfg80211_chandef_create(&wdev->wext.ibss.chandef, new_chan,
293 NL80211_CHAN_NO_HT);
293 } 294 }
294 295
295 /* don't join -- SSID is not there */ 296 /* don't join -- SSID is not there */
@@ -363,9 +364,8 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
363 return err; 364 return err;
364 365
365 if (chan) { 366 if (chan) {
366 wdev->wext.ibss.chandef.chan = chan; 367 cfg80211_chandef_create(&wdev->wext.ibss.chandef, chan,
367 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; 368 NL80211_CHAN_NO_HT);
368 wdev->wext.ibss.chandef.center_freq1 = freq;
369 wdev->wext.ibss.channel_fixed = true; 369 wdev->wext.ibss.channel_fixed = true;
370 } else { 370 } else {
371 /* cfg80211_ibss_wext_join will pick one if needed */ 371 /* cfg80211_ibss_wext_join will pick one if needed */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a7f4e7902104..138dc3bb8b67 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -30,9 +30,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
30 struct cfg80211_crypto_settings *settings, 30 struct cfg80211_crypto_settings *settings,
31 int cipher_limit); 31 int cipher_limit);
32 32
33static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, 33static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
34 struct genl_info *info); 34 struct genl_info *info);
35static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, 35static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
36 struct genl_info *info); 36 struct genl_info *info);
37 37
38/* the netlink family */ 38/* the netlink family */
@@ -47,6 +47,25 @@ static struct genl_family nl80211_fam = {
47 .post_doit = nl80211_post_doit, 47 .post_doit = nl80211_post_doit,
48}; 48};
49 49
50/* multicast groups */
51enum nl80211_multicast_groups {
52 NL80211_MCGRP_CONFIG,
53 NL80211_MCGRP_SCAN,
54 NL80211_MCGRP_REGULATORY,
55 NL80211_MCGRP_MLME,
56 NL80211_MCGRP_TESTMODE /* keep last - ifdef! */
57};
58
59static const struct genl_multicast_group nl80211_mcgrps[] = {
60 [NL80211_MCGRP_CONFIG] = { .name = "config", },
61 [NL80211_MCGRP_SCAN] = { .name = "scan", },
62 [NL80211_MCGRP_REGULATORY] = { .name = "regulatory", },
63 [NL80211_MCGRP_MLME] = { .name = "mlme", },
64#ifdef CONFIG_NL80211_TESTMODE
65 [NL80211_MCGRP_TESTMODE] = { .name = "testmode", }
66#endif
67};
68
50/* returns ERR_PTR values */ 69/* returns ERR_PTR values */
51static struct wireless_dev * 70static struct wireless_dev *
52__cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs) 71__cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs)
@@ -2668,7 +2687,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2668 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 2687 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
2669 NL80211_CMD_NEW_KEY); 2688 NL80211_CMD_NEW_KEY);
2670 if (!hdr) 2689 if (!hdr)
2671 return -ENOBUFS; 2690 goto nla_put_failure;
2672 2691
2673 cookie.msg = msg; 2692 cookie.msg = msg;
2674 cookie.idx = key_idx; 2693 cookie.idx = key_idx;
@@ -5330,6 +5349,10 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
5330 err = -EINVAL; 5349 err = -EINVAL;
5331 goto out_free; 5350 goto out_free;
5332 } 5351 }
5352
5353 if (!wiphy->bands[band])
5354 continue;
5355
5333 err = ieee80211_get_ratemask(wiphy->bands[band], 5356 err = ieee80211_get_ratemask(wiphy->bands[band],
5334 nla_data(attr), 5357 nla_data(attr),
5335 nla_len(attr), 5358 nla_len(attr),
@@ -6656,10 +6679,6 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
6656 6679
6657 6680
6658#ifdef CONFIG_NL80211_TESTMODE 6681#ifdef CONFIG_NL80211_TESTMODE
6659static struct genl_multicast_group nl80211_testmode_mcgrp = {
6660 .name = "testmode",
6661};
6662
6663static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) 6682static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
6664{ 6683{
6665 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6684 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6868,8 +6887,8 @@ void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
6868 6887
6869 nla_nest_end(skb, data); 6888 nla_nest_end(skb, data);
6870 genlmsg_end(skb, hdr); 6889 genlmsg_end(skb, hdr);
6871 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0, 6890 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), skb, 0,
6872 nl80211_testmode_mcgrp.id, gfp); 6891 NL80211_MCGRP_TESTMODE, gfp);
6873} 6892}
6874EXPORT_SYMBOL(cfg80211_testmode_event); 6893EXPORT_SYMBOL(cfg80211_testmode_event);
6875#endif 6894#endif
@@ -8851,7 +8870,7 @@ static int nl80211_crit_protocol_stop(struct sk_buff *skb,
8851#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\ 8870#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\
8852 NL80211_FLAG_CHECK_NETDEV_UP) 8871 NL80211_FLAG_CHECK_NETDEV_UP)
8853 8872
8854static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, 8873static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
8855 struct genl_info *info) 8874 struct genl_info *info)
8856{ 8875{
8857 struct cfg80211_registered_device *rdev; 8876 struct cfg80211_registered_device *rdev;
@@ -8920,7 +8939,7 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
8920 return 0; 8939 return 0;
8921} 8940}
8922 8941
8923static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, 8942static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
8924 struct genl_info *info) 8943 struct genl_info *info)
8925{ 8944{
8926 if (info->user_ptr[1]) { 8945 if (info->user_ptr[1]) {
@@ -8937,7 +8956,7 @@ static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb,
8937 rtnl_unlock(); 8956 rtnl_unlock();
8938} 8957}
8939 8958
8940static struct genl_ops nl80211_ops[] = { 8959static const struct genl_ops nl80211_ops[] = {
8941 { 8960 {
8942 .cmd = NL80211_CMD_GET_WIPHY, 8961 .cmd = NL80211_CMD_GET_WIPHY,
8943 .doit = nl80211_get_wiphy, 8962 .doit = nl80211_get_wiphy,
@@ -9566,21 +9585,6 @@ static struct genl_ops nl80211_ops[] = {
9566 }, 9585 },
9567}; 9586};
9568 9587
9569static struct genl_multicast_group nl80211_mlme_mcgrp = {
9570 .name = "mlme",
9571};
9572
9573/* multicast groups */
9574static struct genl_multicast_group nl80211_config_mcgrp = {
9575 .name = "config",
9576};
9577static struct genl_multicast_group nl80211_scan_mcgrp = {
9578 .name = "scan",
9579};
9580static struct genl_multicast_group nl80211_regulatory_mcgrp = {
9581 .name = "regulatory",
9582};
9583
9584/* notification functions */ 9588/* notification functions */
9585 9589
9586void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) 9590void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
@@ -9597,8 +9601,8 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
9597 return; 9601 return;
9598 } 9602 }
9599 9603
9600 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 9604 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
9601 nl80211_config_mcgrp.id, GFP_KERNEL); 9605 NL80211_MCGRP_CONFIG, GFP_KERNEL);
9602} 9606}
9603 9607
9604static int nl80211_add_scan_req(struct sk_buff *msg, 9608static int nl80211_add_scan_req(struct sk_buff *msg,
@@ -9633,8 +9637,9 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
9633 nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) 9637 nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
9634 goto nla_put_failure; 9638 goto nla_put_failure;
9635 9639
9636 if (req->flags) 9640 if (req->flags &&
9637 nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags); 9641 nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags))
9642 goto nla_put_failure;
9638 9643
9639 return 0; 9644 return 0;
9640 nla_put_failure: 9645 nla_put_failure:
@@ -9707,8 +9712,8 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
9707 return; 9712 return;
9708 } 9713 }
9709 9714
9710 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 9715 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
9711 nl80211_scan_mcgrp.id, GFP_KERNEL); 9716 NL80211_MCGRP_SCAN, GFP_KERNEL);
9712} 9717}
9713 9718
9714void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, 9719void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
@@ -9726,8 +9731,8 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
9726 return; 9731 return;
9727 } 9732 }
9728 9733
9729 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 9734 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
9730 nl80211_scan_mcgrp.id, GFP_KERNEL); 9735 NL80211_MCGRP_SCAN, GFP_KERNEL);
9731} 9736}
9732 9737
9733void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, 9738void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
@@ -9745,8 +9750,8 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
9745 return; 9750 return;
9746 } 9751 }
9747 9752
9748 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 9753 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
9749 nl80211_scan_mcgrp.id, GFP_KERNEL); 9754 NL80211_MCGRP_SCAN, GFP_KERNEL);
9750} 9755}
9751 9756
9752void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, 9757void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
@@ -9764,8 +9769,8 @@ void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
9764 return; 9769 return;
9765 } 9770 }
9766 9771
9767 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 9772 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
9768 nl80211_scan_mcgrp.id, GFP_KERNEL); 9773 NL80211_MCGRP_SCAN, GFP_KERNEL);
9769} 9774}
9770 9775
9771void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, 9776void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
@@ -9782,8 +9787,8 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
9782 return; 9787 return;
9783 } 9788 }
9784 9789
9785 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 9790 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
9786 nl80211_scan_mcgrp.id, GFP_KERNEL); 9791 NL80211_MCGRP_SCAN, GFP_KERNEL);
9787} 9792}
9788 9793
9789/* 9794/*
@@ -9837,8 +9842,8 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
9837 genlmsg_end(msg, hdr); 9842 genlmsg_end(msg, hdr);
9838 9843
9839 rcu_read_lock(); 9844 rcu_read_lock();
9840 genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, 9845 genlmsg_multicast_allns(&nl80211_fam, msg, 0,
9841 GFP_ATOMIC); 9846 NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
9842 rcu_read_unlock(); 9847 rcu_read_unlock();
9843 9848
9844 return; 9849 return;
@@ -9873,8 +9878,8 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
9873 9878
9874 genlmsg_end(msg, hdr); 9879 genlmsg_end(msg, hdr);
9875 9880
9876 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 9881 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
9877 nl80211_mlme_mcgrp.id, gfp); 9882 NL80211_MCGRP_MLME, gfp);
9878 return; 9883 return;
9879 9884
9880 nla_put_failure: 9885 nla_put_failure:
@@ -9961,8 +9966,8 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
9961 9966
9962 genlmsg_end(msg, hdr); 9967 genlmsg_end(msg, hdr);
9963 9968
9964 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 9969 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
9965 nl80211_mlme_mcgrp.id, gfp); 9970 NL80211_MCGRP_MLME, gfp);
9966 return; 9971 return;
9967 9972
9968 nla_put_failure: 9973 nla_put_failure:
@@ -10017,8 +10022,8 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
10017 10022
10018 genlmsg_end(msg, hdr); 10023 genlmsg_end(msg, hdr);
10019 10024
10020 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10025 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10021 nl80211_mlme_mcgrp.id, gfp); 10026 NL80211_MCGRP_MLME, gfp);
10022 return; 10027 return;
10023 10028
10024 nla_put_failure: 10029 nla_put_failure:
@@ -10056,8 +10061,8 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
10056 10061
10057 genlmsg_end(msg, hdr); 10062 genlmsg_end(msg, hdr);
10058 10063
10059 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10064 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10060 nl80211_mlme_mcgrp.id, gfp); 10065 NL80211_MCGRP_MLME, gfp);
10061 return; 10066 return;
10062 10067
10063 nla_put_failure: 10068 nla_put_failure:
@@ -10094,8 +10099,8 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
10094 10099
10095 genlmsg_end(msg, hdr); 10100 genlmsg_end(msg, hdr);
10096 10101
10097 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10102 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10098 nl80211_mlme_mcgrp.id, GFP_KERNEL); 10103 NL80211_MCGRP_MLME, GFP_KERNEL);
10099 return; 10104 return;
10100 10105
10101 nla_put_failure: 10106 nla_put_failure:
@@ -10128,8 +10133,8 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
10128 10133
10129 genlmsg_end(msg, hdr); 10134 genlmsg_end(msg, hdr);
10130 10135
10131 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10136 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10132 nl80211_mlme_mcgrp.id, gfp); 10137 NL80211_MCGRP_MLME, gfp);
10133 return; 10138 return;
10134 10139
10135 nla_put_failure: 10140 nla_put_failure:
@@ -10169,8 +10174,8 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
10169 10174
10170 genlmsg_end(msg, hdr); 10175 genlmsg_end(msg, hdr);
10171 10176
10172 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10177 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10173 nl80211_mlme_mcgrp.id, gfp); 10178 NL80211_MCGRP_MLME, gfp);
10174 return; 10179 return;
10175 10180
10176 nla_put_failure: 10181 nla_put_failure:
@@ -10208,8 +10213,8 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
10208 10213
10209 genlmsg_end(msg, hdr); 10214 genlmsg_end(msg, hdr);
10210 10215
10211 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10216 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10212 nl80211_mlme_mcgrp.id, gfp); 10217 NL80211_MCGRP_MLME, gfp);
10213 return; 10218 return;
10214 10219
10215 nla_put_failure: 10220 nla_put_failure:
@@ -10261,8 +10266,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
10261 genlmsg_end(msg, hdr); 10266 genlmsg_end(msg, hdr);
10262 10267
10263 rcu_read_lock(); 10268 rcu_read_lock();
10264 genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, 10269 genlmsg_multicast_allns(&nl80211_fam, msg, 0,
10265 GFP_ATOMIC); 10270 NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
10266 rcu_read_unlock(); 10271 rcu_read_unlock();
10267 10272
10268 return; 10273 return;
@@ -10307,8 +10312,8 @@ static void nl80211_send_remain_on_chan_event(
10307 10312
10308 genlmsg_end(msg, hdr); 10313 genlmsg_end(msg, hdr);
10309 10314
10310 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10315 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10311 nl80211_mlme_mcgrp.id, gfp); 10316 NL80211_MCGRP_MLME, gfp);
10312 return; 10317 return;
10313 10318
10314 nla_put_failure: 10319 nla_put_failure:
@@ -10362,8 +10367,8 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
10362 return; 10367 return;
10363 } 10368 }
10364 10369
10365 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10370 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10366 nl80211_mlme_mcgrp.id, gfp); 10371 NL80211_MCGRP_MLME, gfp);
10367} 10372}
10368EXPORT_SYMBOL(cfg80211_new_sta); 10373EXPORT_SYMBOL(cfg80211_new_sta);
10369 10374
@@ -10392,8 +10397,8 @@ void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
10392 10397
10393 genlmsg_end(msg, hdr); 10398 genlmsg_end(msg, hdr);
10394 10399
10395 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10400 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10396 nl80211_mlme_mcgrp.id, gfp); 10401 NL80211_MCGRP_MLME, gfp);
10397 return; 10402 return;
10398 10403
10399 nla_put_failure: 10404 nla_put_failure:
@@ -10428,8 +10433,8 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
10428 10433
10429 genlmsg_end(msg, hdr); 10434 genlmsg_end(msg, hdr);
10430 10435
10431 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10436 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10432 nl80211_mlme_mcgrp.id, gfp); 10437 NL80211_MCGRP_MLME, gfp);
10433 return; 10438 return;
10434 10439
10435 nla_put_failure: 10440 nla_put_failure:
@@ -10590,8 +10595,8 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
10590 10595
10591 genlmsg_end(msg, hdr); 10596 genlmsg_end(msg, hdr);
10592 10597
10593 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10598 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10594 nl80211_mlme_mcgrp.id, gfp); 10599 NL80211_MCGRP_MLME, gfp);
10595 return; 10600 return;
10596 10601
10597 nla_put_failure: 10602 nla_put_failure:
@@ -10639,8 +10644,8 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
10639 10644
10640 genlmsg_end(msg, hdr); 10645 genlmsg_end(msg, hdr);
10641 10646
10642 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10647 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10643 nl80211_mlme_mcgrp.id, gfp); 10648 NL80211_MCGRP_MLME, gfp);
10644 return; 10649 return;
10645 10650
10646 nla_put_failure: 10651 nla_put_failure:
@@ -10684,8 +10689,8 @@ static void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
10684 10689
10685 genlmsg_end(msg, hdr); 10690 genlmsg_end(msg, hdr);
10686 10691
10687 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10692 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10688 nl80211_mlme_mcgrp.id, gfp); 10693 NL80211_MCGRP_MLME, gfp);
10689 return; 10694 return;
10690 10695
10691 nla_put_failure: 10696 nla_put_failure:
@@ -10742,8 +10747,8 @@ nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
10742 10747
10743 genlmsg_end(msg, hdr); 10748 genlmsg_end(msg, hdr);
10744 10749
10745 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10750 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10746 nl80211_mlme_mcgrp.id, gfp); 10751 NL80211_MCGRP_MLME, gfp);
10747 return; 10752 return;
10748 10753
10749 nla_put_failure: 10754 nla_put_failure:
@@ -10789,8 +10794,8 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
10789 10794
10790 genlmsg_end(msg, hdr); 10795 genlmsg_end(msg, hdr);
10791 10796
10792 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10797 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10793 nl80211_mlme_mcgrp.id, gfp); 10798 NL80211_MCGRP_MLME, gfp);
10794 return; 10799 return;
10795 10800
10796 nla_put_failure: 10801 nla_put_failure:
@@ -10866,8 +10871,8 @@ void cfg80211_cqm_txe_notify(struct net_device *dev,
10866 10871
10867 genlmsg_end(msg, hdr); 10872 genlmsg_end(msg, hdr);
10868 10873
10869 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10874 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10870 nl80211_mlme_mcgrp.id, gfp); 10875 NL80211_MCGRP_MLME, gfp);
10871 return; 10876 return;
10872 10877
10873 nla_put_failure: 10878 nla_put_failure:
@@ -10915,8 +10920,8 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
10915 10920
10916 genlmsg_end(msg, hdr); 10921 genlmsg_end(msg, hdr);
10917 10922
10918 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10923 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10919 nl80211_mlme_mcgrp.id, gfp); 10924 NL80211_MCGRP_MLME, gfp);
10920 return; 10925 return;
10921 10926
10922 nla_put_failure: 10927 nla_put_failure:
@@ -10962,8 +10967,8 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
10962 10967
10963 genlmsg_end(msg, hdr); 10968 genlmsg_end(msg, hdr);
10964 10969
10965 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 10970 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10966 nl80211_mlme_mcgrp.id, gfp); 10971 NL80211_MCGRP_MLME, gfp);
10967 return; 10972 return;
10968 10973
10969 nla_put_failure: 10974 nla_put_failure:
@@ -11002,8 +11007,8 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
11002 11007
11003 genlmsg_end(msg, hdr); 11008 genlmsg_end(msg, hdr);
11004 11009
11005 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 11010 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
11006 nl80211_mlme_mcgrp.id, gfp); 11011 NL80211_MCGRP_MLME, gfp);
11007 return; 11012 return;
11008 11013
11009 nla_put_failure: 11014 nla_put_failure:
@@ -11093,6 +11098,8 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
11093 struct nlattr *reasons; 11098 struct nlattr *reasons;
11094 11099
11095 reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); 11100 reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS);
11101 if (!reasons)
11102 goto free_msg;
11096 11103
11097 if (wakeup->disconnect && 11104 if (wakeup->disconnect &&
11098 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) 11105 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT))
@@ -11118,16 +11125,18 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
11118 wakeup->pattern_idx)) 11125 wakeup->pattern_idx))
11119 goto free_msg; 11126 goto free_msg;
11120 11127
11121 if (wakeup->tcp_match) 11128 if (wakeup->tcp_match &&
11122 nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH); 11129 nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH))
11130 goto free_msg;
11123 11131
11124 if (wakeup->tcp_connlost) 11132 if (wakeup->tcp_connlost &&
11125 nla_put_flag(msg, 11133 nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST))
11126 NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST); 11134 goto free_msg;
11127 11135
11128 if (wakeup->tcp_nomoretokens) 11136 if (wakeup->tcp_nomoretokens &&
11129 nla_put_flag(msg, 11137 nla_put_flag(msg,
11130 NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS); 11138 NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS))
11139 goto free_msg;
11131 11140
11132 if (wakeup->packet) { 11141 if (wakeup->packet) {
11133 u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211; 11142 u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211;
@@ -11154,8 +11163,8 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
11154 11163
11155 genlmsg_end(msg, hdr); 11164 genlmsg_end(msg, hdr);
11156 11165
11157 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 11166 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
11158 nl80211_mlme_mcgrp.id, gfp); 11167 NL80211_MCGRP_MLME, gfp);
11159 return; 11168 return;
11160 11169
11161 free_msg: 11170 free_msg:
@@ -11196,8 +11205,8 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
11196 11205
11197 genlmsg_end(msg, hdr); 11206 genlmsg_end(msg, hdr);
11198 11207
11199 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 11208 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
11200 nl80211_mlme_mcgrp.id, gfp); 11209 NL80211_MCGRP_MLME, gfp);
11201 return; 11210 return;
11202 11211
11203 nla_put_failure: 11212 nla_put_failure:
@@ -11263,24 +11272,29 @@ void cfg80211_ft_event(struct net_device *netdev,
11263 return; 11272 return;
11264 11273
11265 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT); 11274 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT);
11266 if (!hdr) { 11275 if (!hdr)
11267 nlmsg_free(msg); 11276 goto out;
11268 return;
11269 }
11270 11277
11271 nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 11278 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
11272 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 11279 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
11273 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap); 11280 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap))
11274 if (ft_event->ies) 11281 goto out;
11275 nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies); 11282
11276 if (ft_event->ric_ies) 11283 if (ft_event->ies &&
11277 nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, 11284 nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies))
11278 ft_event->ric_ies); 11285 goto out;
11286 if (ft_event->ric_ies &&
11287 nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len,
11288 ft_event->ric_ies))
11289 goto out;
11279 11290
11280 genlmsg_end(msg, hdr); 11291 genlmsg_end(msg, hdr);
11281 11292
11282 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 11293 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
11283 nl80211_mlme_mcgrp.id, GFP_KERNEL); 11294 NL80211_MCGRP_MLME, GFP_KERNEL);
11295 return;
11296 out:
11297 nlmsg_free(msg);
11284} 11298}
11285EXPORT_SYMBOL(cfg80211_ft_event); 11299EXPORT_SYMBOL(cfg80211_ft_event);
11286 11300
@@ -11329,33 +11343,11 @@ int nl80211_init(void)
11329{ 11343{
11330 int err; 11344 int err;
11331 11345
11332 err = genl_register_family_with_ops(&nl80211_fam, 11346 err = genl_register_family_with_ops_groups(&nl80211_fam, nl80211_ops,
11333 nl80211_ops, ARRAY_SIZE(nl80211_ops)); 11347 nl80211_mcgrps);
11334 if (err) 11348 if (err)
11335 return err; 11349 return err;
11336 11350
11337 err = genl_register_mc_group(&nl80211_fam, &nl80211_config_mcgrp);
11338 if (err)
11339 goto err_out;
11340
11341 err = genl_register_mc_group(&nl80211_fam, &nl80211_scan_mcgrp);
11342 if (err)
11343 goto err_out;
11344
11345 err = genl_register_mc_group(&nl80211_fam, &nl80211_regulatory_mcgrp);
11346 if (err)
11347 goto err_out;
11348
11349 err = genl_register_mc_group(&nl80211_fam, &nl80211_mlme_mcgrp);
11350 if (err)
11351 goto err_out;
11352
11353#ifdef CONFIG_NL80211_TESTMODE
11354 err = genl_register_mc_group(&nl80211_fam, &nl80211_testmode_mcgrp);
11355 if (err)
11356 goto err_out;
11357#endif
11358
11359 err = netlink_register_notifier(&nl80211_netlink_notifier); 11351 err = netlink_register_notifier(&nl80211_netlink_notifier);
11360 if (err) 11352 if (err)
11361 goto err_out; 11353 goto err_out;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 45a3ab5612c1..7622789d3750 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1340,10 +1340,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1340 if (sx25) { 1340 if (sx25) {
1341 sx25->sx25_family = AF_X25; 1341 sx25->sx25_family = AF_X25;
1342 sx25->sx25_addr = x25->dest_addr; 1342 sx25->sx25_addr = x25->dest_addr;
1343 msg->msg_namelen = sizeof(*sx25);
1343 } 1344 }
1344 1345
1345 msg->msg_namelen = sizeof(struct sockaddr_x25);
1346
1347 x25_check_rbuf(sk); 1346 x25_check_rbuf(sk);
1348 rc = copied; 1347 rc = copied;
1349out_free_dgram: 1348out_free_dgram:
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
index db0e5cd34c70..91c4117637ae 100644
--- a/scripts/asn1_compiler.c
+++ b/scripts/asn1_compiler.c
@@ -1353,6 +1353,8 @@ static void render_out_of_line_list(FILE *out)
1353 render_opcode(out, "ASN1_OP_END_SET_OF%s,\n", act); 1353 render_opcode(out, "ASN1_OP_END_SET_OF%s,\n", act);
1354 render_opcode(out, "_jump_target(%u),\n", entry); 1354 render_opcode(out, "_jump_target(%u),\n", entry);
1355 break; 1355 break;
1356 default:
1357 break;
1356 } 1358 }
1357 if (e->action) 1359 if (e->action)
1358 render_opcode(out, "_action(ACT_%s),\n", 1360 render_opcode(out, "_action(ACT_%s),\n",
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 61090e0ff613..9c9810030377 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3289,6 +3289,7 @@ sub process {
3289 } 3289 }
3290 } 3290 }
3291 if (!defined $suppress_whiletrailers{$linenr} && 3291 if (!defined $suppress_whiletrailers{$linenr} &&
3292 defined($stat) && defined($cond) &&
3292 $line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) { 3293 $line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
3293 my ($s, $c) = ($stat, $cond); 3294 my ($s, $c) = ($stat, $cond);
3294 3295
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index d0da66396f62..91280b82da08 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -364,7 +364,8 @@ if ($arch eq "x86_64") {
364} elsif ($arch eq "blackfin") { 364} elsif ($arch eq "blackfin") {
365 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$"; 365 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
366 $mcount_adjust = -4; 366 $mcount_adjust = -4;
367} elsif ($arch eq "tilegx") { 367} elsif ($arch eq "tilegx" || $arch eq "tile") {
368 # Default to the newer TILE-Gx architecture if only "tile" is given.
368 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$"; 369 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
369 $type = ".quad"; 370 $type = ".quad";
370 $alignment = 8; 371 $alignment = 8;
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index 5f7a8b663cb9..7941fbdfb050 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -31,6 +31,10 @@
31#include <tools/be_byteshift.h> 31#include <tools/be_byteshift.h>
32#include <tools/le_byteshift.h> 32#include <tools/le_byteshift.h>
33 33
34#ifndef EM_ARCOMPACT
35#define EM_ARCOMPACT 93
36#endif
37
34#ifndef EM_AARCH64 38#ifndef EM_AARCH64
35#define EM_AARCH64 183 39#define EM_AARCH64 183
36#endif 40#endif
@@ -268,6 +272,7 @@ do_file(char const *const fname)
268 case EM_S390: 272 case EM_S390:
269 custom_sort = sort_relative_table; 273 custom_sort = sort_relative_table;
270 break; 274 break;
275 case EM_ARCOMPACT:
271 case EM_ARM: 276 case EM_ARM:
272 case EM_AARCH64: 277 case EM_AARCH64:
273 case EM_MIPS: 278 case EM_MIPS:
diff --git a/security/Makefile b/security/Makefile
index c26c81e92571..a5918e01a4f7 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_MMU) += min_addr.o
16# Object file lists 16# Object file lists
17obj-$(CONFIG_SECURITY) += security.o capability.o 17obj-$(CONFIG_SECURITY) += security.o capability.o
18obj-$(CONFIG_SECURITYFS) += inode.o 18obj-$(CONFIG_SECURITYFS) += inode.o
19# Must precede capability.o in order to stack properly.
20obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o 19obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
21obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o 20obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
22obj-$(CONFIG_AUDIT) += lsm_audit.o 21obj-$(CONFIG_AUDIT) += lsm_audit.o
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 031d2d9dd695..89c78658031f 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -111,7 +111,6 @@ static const char *const aa_audit_type[] = {
111static void audit_pre(struct audit_buffer *ab, void *ca) 111static void audit_pre(struct audit_buffer *ab, void *ca)
112{ 112{
113 struct common_audit_data *sa = ca; 113 struct common_audit_data *sa = ca;
114 struct task_struct *tsk = sa->aad->tsk ? sa->aad->tsk : current;
115 114
116 if (aa_g_audit_header) { 115 if (aa_g_audit_header) {
117 audit_log_format(ab, "apparmor="); 116 audit_log_format(ab, "apparmor=");
@@ -132,11 +131,6 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
132 131
133 if (sa->aad->profile) { 132 if (sa->aad->profile) {
134 struct aa_profile *profile = sa->aad->profile; 133 struct aa_profile *profile = sa->aad->profile;
135 pid_t pid;
136 rcu_read_lock();
137 pid = rcu_dereference(tsk->real_parent)->pid;
138 rcu_read_unlock();
139 audit_log_format(ab, " parent=%d", pid);
140 if (profile->ns != root_ns) { 134 if (profile->ns != root_ns) {
141 audit_log_format(ab, " namespace="); 135 audit_log_format(ab, " namespace=");
142 audit_log_untrustedstring(ab, profile->ns->base.hname); 136 audit_log_untrustedstring(ab, profile->ns->base.hname);
@@ -149,12 +143,6 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
149 audit_log_format(ab, " name="); 143 audit_log_format(ab, " name=");
150 audit_log_untrustedstring(ab, sa->aad->name); 144 audit_log_untrustedstring(ab, sa->aad->name);
151 } 145 }
152
153 if (sa->aad->tsk) {
154 audit_log_format(ab, " pid=%d comm=", tsk->pid);
155 audit_log_untrustedstring(ab, tsk->comm);
156 }
157
158} 146}
159 147
160/** 148/**
@@ -212,7 +200,7 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
212 200
213 if (sa->aad->type == AUDIT_APPARMOR_KILL) 201 if (sa->aad->type == AUDIT_APPARMOR_KILL)
214 (void)send_sig_info(SIGKILL, NULL, 202 (void)send_sig_info(SIGKILL, NULL,
215 sa->aad->tsk ? sa->aad->tsk : current); 203 sa->u.tsk ? sa->u.tsk : current);
216 204
217 if (sa->aad->type == AUDIT_APPARMOR_ALLOWED) 205 if (sa->aad->type == AUDIT_APPARMOR_ALLOWED)
218 return complain_error(sa->aad->error); 206 return complain_error(sa->aad->error);
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index 84d1f5f53877..1101c6f64bb7 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -53,8 +53,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
53 53
54/** 54/**
55 * audit_caps - audit a capability 55 * audit_caps - audit a capability
56 * @profile: profile confining task (NOT NULL) 56 * @profile: profile being tested for confinement (NOT NULL)
57 * @task: task capability test was performed against (NOT NULL)
58 * @cap: capability tested 57 * @cap: capability tested
59 * @error: error code returned by test 58 * @error: error code returned by test
60 * 59 *
@@ -63,8 +62,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
63 * 62 *
64 * Returns: 0 or sa->error on success, error code on failure 63 * Returns: 0 or sa->error on success, error code on failure
65 */ 64 */
66static int audit_caps(struct aa_profile *profile, struct task_struct *task, 65static int audit_caps(struct aa_profile *profile, int cap, int error)
67 int cap, int error)
68{ 66{
69 struct audit_cache *ent; 67 struct audit_cache *ent;
70 int type = AUDIT_APPARMOR_AUTO; 68 int type = AUDIT_APPARMOR_AUTO;
@@ -73,7 +71,6 @@ static int audit_caps(struct aa_profile *profile, struct task_struct *task,
73 sa.type = LSM_AUDIT_DATA_CAP; 71 sa.type = LSM_AUDIT_DATA_CAP;
74 sa.aad = &aad; 72 sa.aad = &aad;
75 sa.u.cap = cap; 73 sa.u.cap = cap;
76 sa.aad->tsk = task;
77 sa.aad->op = OP_CAPABLE; 74 sa.aad->op = OP_CAPABLE;
78 sa.aad->error = error; 75 sa.aad->error = error;
79 76
@@ -124,8 +121,7 @@ static int profile_capable(struct aa_profile *profile, int cap)
124 121
125/** 122/**
126 * aa_capable - test permission to use capability 123 * aa_capable - test permission to use capability
127 * @task: task doing capability test against (NOT NULL) 124 * @profile: profile being tested against (NOT NULL)
128 * @profile: profile confining @task (NOT NULL)
129 * @cap: capability to be tested 125 * @cap: capability to be tested
130 * @audit: whether an audit record should be generated 126 * @audit: whether an audit record should be generated
131 * 127 *
@@ -133,8 +129,7 @@ static int profile_capable(struct aa_profile *profile, int cap)
133 * 129 *
134 * Returns: 0 on success, or else an error code. 130 * Returns: 0 on success, or else an error code.
135 */ 131 */
136int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap, 132int aa_capable(struct aa_profile *profile, int cap, int audit)
137 int audit)
138{ 133{
139 int error = profile_capable(profile, cap); 134 int error = profile_capable(profile, cap);
140 135
@@ -144,5 +139,5 @@ int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
144 return error; 139 return error;
145 } 140 }
146 141
147 return audit_caps(profile, task, cap, error); 142 return audit_caps(profile, cap, error);
148} 143}
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 26c607c971f5..452567d3a08e 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -50,23 +50,21 @@ void aa_free_domain_entries(struct aa_domain *domain)
50 50
51/** 51/**
52 * may_change_ptraced_domain - check if can change profile on ptraced task 52 * may_change_ptraced_domain - check if can change profile on ptraced task
53 * @task: task we want to change profile of (NOT NULL)
54 * @to_profile: profile to change to (NOT NULL) 53 * @to_profile: profile to change to (NOT NULL)
55 * 54 *
56 * Check if the task is ptraced and if so if the tracing task is allowed 55 * Check if current is ptraced and if so if the tracing task is allowed
57 * to trace the new domain 56 * to trace the new domain
58 * 57 *
59 * Returns: %0 or error if change not allowed 58 * Returns: %0 or error if change not allowed
60 */ 59 */
61static int may_change_ptraced_domain(struct task_struct *task, 60static int may_change_ptraced_domain(struct aa_profile *to_profile)
62 struct aa_profile *to_profile)
63{ 61{
64 struct task_struct *tracer; 62 struct task_struct *tracer;
65 struct aa_profile *tracerp = NULL; 63 struct aa_profile *tracerp = NULL;
66 int error = 0; 64 int error = 0;
67 65
68 rcu_read_lock(); 66 rcu_read_lock();
69 tracer = ptrace_parent(task); 67 tracer = ptrace_parent(current);
70 if (tracer) 68 if (tracer)
71 /* released below */ 69 /* released below */
72 tracerp = aa_get_task_profile(tracer); 70 tracerp = aa_get_task_profile(tracer);
@@ -75,7 +73,7 @@ static int may_change_ptraced_domain(struct task_struct *task,
75 if (!tracer || unconfined(tracerp)) 73 if (!tracer || unconfined(tracerp))
76 goto out; 74 goto out;
77 75
78 error = aa_may_ptrace(tracer, tracerp, to_profile, PTRACE_MODE_ATTACH); 76 error = aa_may_ptrace(tracerp, to_profile, PTRACE_MODE_ATTACH);
79 77
80out: 78out:
81 rcu_read_unlock(); 79 rcu_read_unlock();
@@ -477,7 +475,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
477 } 475 }
478 476
479 if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) { 477 if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
480 error = may_change_ptraced_domain(current, new_profile); 478 error = may_change_ptraced_domain(new_profile);
481 if (error) { 479 if (error) {
482 aa_put_profile(new_profile); 480 aa_put_profile(new_profile);
483 goto audit; 481 goto audit;
@@ -690,7 +688,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
690 } 688 }
691 } 689 }
692 690
693 error = may_change_ptraced_domain(current, hat); 691 error = may_change_ptraced_domain(hat);
694 if (error) { 692 if (error) {
695 info = "ptraced"; 693 info = "ptraced";
696 error = -EPERM; 694 error = -EPERM;
@@ -829,7 +827,7 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
829 } 827 }
830 828
831 /* check if tracing task is allowed to trace target domain */ 829 /* check if tracing task is allowed to trace target domain */
832 error = may_change_ptraced_domain(current, target); 830 error = may_change_ptraced_domain(target);
833 if (error) { 831 if (error) {
834 info = "ptrace prevents transition"; 832 info = "ptrace prevents transition";
835 goto audit; 833 goto audit;
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index 30e8d7687259..ba3dfd17f23f 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -109,7 +109,6 @@ struct apparmor_audit_data {
109 void *profile; 109 void *profile;
110 const char *name; 110 const char *name;
111 const char *info; 111 const char *info;
112 struct task_struct *tsk;
113 union { 112 union {
114 void *target; 113 void *target;
115 struct { 114 struct {
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
index 2e7c9d6a2f3b..fc3fa381d850 100644
--- a/security/apparmor/include/capability.h
+++ b/security/apparmor/include/capability.h
@@ -4,7 +4,7 @@
4 * This file contains AppArmor capability mediation definitions. 4 * This file contains AppArmor capability mediation definitions.
5 * 5 *
6 * Copyright (C) 1998-2008 Novell/SUSE 6 * Copyright (C) 1998-2008 Novell/SUSE
7 * Copyright 2009-2010 Canonical Ltd. 7 * Copyright 2009-2013 Canonical Ltd.
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as 10 * modify it under the terms of the GNU General Public License as
@@ -38,8 +38,7 @@ struct aa_caps {
38 38
39extern struct aa_fs_entry aa_fs_entry_caps[]; 39extern struct aa_fs_entry aa_fs_entry_caps[];
40 40
41int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap, 41int aa_capable(struct aa_profile *profile, int cap, int audit);
42 int audit);
43 42
44static inline void aa_free_cap_rules(struct aa_caps *caps) 43static inline void aa_free_cap_rules(struct aa_caps *caps)
45{ 44{
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
index aeda0fbc8b2f..288ca76e2fb1 100644
--- a/security/apparmor/include/ipc.h
+++ b/security/apparmor/include/ipc.h
@@ -19,8 +19,8 @@
19 19
20struct aa_profile; 20struct aa_profile;
21 21
22int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer, 22int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
23 struct aa_profile *tracee, unsigned int mode); 23 unsigned int mode);
24 24
25int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee, 25int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
26 unsigned int mode); 26 unsigned int mode);
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index c51d2266587e..777ac1c47253 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -54,15 +54,14 @@ static int aa_audit_ptrace(struct aa_profile *profile,
54 54
55/** 55/**
56 * aa_may_ptrace - test if tracer task can trace the tracee 56 * aa_may_ptrace - test if tracer task can trace the tracee
57 * @tracer_task: task who will do the tracing (NOT NULL)
58 * @tracer: profile of the task doing the tracing (NOT NULL) 57 * @tracer: profile of the task doing the tracing (NOT NULL)
59 * @tracee: task to be traced 58 * @tracee: task to be traced
60 * @mode: whether PTRACE_MODE_READ || PTRACE_MODE_ATTACH 59 * @mode: whether PTRACE_MODE_READ || PTRACE_MODE_ATTACH
61 * 60 *
62 * Returns: %0 else error code if permission denied or error 61 * Returns: %0 else error code if permission denied or error
63 */ 62 */
64int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer, 63int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
65 struct aa_profile *tracee, unsigned int mode) 64 unsigned int mode)
66{ 65{
67 /* TODO: currently only based on capability, not extended ptrace 66 /* TODO: currently only based on capability, not extended ptrace
68 * rules, 67 * rules,
@@ -72,7 +71,7 @@ int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer,
72 if (unconfined(tracer) || tracer == tracee) 71 if (unconfined(tracer) || tracer == tracee)
73 return 0; 72 return 0;
74 /* log this capability request */ 73 /* log this capability request */
75 return aa_capable(tracer_task, tracer, CAP_SYS_PTRACE, 1); 74 return aa_capable(tracer, CAP_SYS_PTRACE, 1);
76} 75}
77 76
78/** 77/**
@@ -101,7 +100,7 @@ int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
101 if (!unconfined(tracer_p)) { 100 if (!unconfined(tracer_p)) {
102 struct aa_profile *tracee_p = aa_get_task_profile(tracee); 101 struct aa_profile *tracee_p = aa_get_task_profile(tracee);
103 102
104 error = aa_may_ptrace(tracer, tracer_p, tracee_p, mode); 103 error = aa_may_ptrace(tracer_p, tracee_p, mode);
105 error = aa_audit_ptrace(tracer_p, tracee_p, error); 104 error = aa_audit_ptrace(tracer_p, tracee_p, error);
106 105
107 aa_put_profile(tracee_p); 106 aa_put_profile(tracee_p);
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index fb99e18123b4..4257b7e2796b 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -145,7 +145,7 @@ static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
145 if (!error) { 145 if (!error) {
146 profile = aa_cred_profile(cred); 146 profile = aa_cred_profile(cred);
147 if (!unconfined(profile)) 147 if (!unconfined(profile))
148 error = aa_capable(current, profile, cap, audit); 148 error = aa_capable(profile, cap, audit);
149 } 149 }
150 return error; 150 return error;
151} 151}
diff --git a/security/capability.c b/security/capability.c
index dbeb9bc27b24..8b4f24ae4338 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -777,9 +777,15 @@ static int cap_xfrm_policy_delete_security(struct xfrm_sec_ctx *ctx)
777 return 0; 777 return 0;
778} 778}
779 779
780static int cap_xfrm_state_alloc_security(struct xfrm_state *x, 780static int cap_xfrm_state_alloc(struct xfrm_state *x,
781 struct xfrm_user_sec_ctx *sec_ctx, 781 struct xfrm_user_sec_ctx *sec_ctx)
782 u32 secid) 782{
783 return 0;
784}
785
786static int cap_xfrm_state_alloc_acquire(struct xfrm_state *x,
787 struct xfrm_sec_ctx *polsec,
788 u32 secid)
783{ 789{
784 return 0; 790 return 0;
785} 791}
@@ -1101,7 +1107,8 @@ void __init security_fixup_ops(struct security_operations *ops)
1101 set_to_cap_if_null(ops, xfrm_policy_clone_security); 1107 set_to_cap_if_null(ops, xfrm_policy_clone_security);
1102 set_to_cap_if_null(ops, xfrm_policy_free_security); 1108 set_to_cap_if_null(ops, xfrm_policy_free_security);
1103 set_to_cap_if_null(ops, xfrm_policy_delete_security); 1109 set_to_cap_if_null(ops, xfrm_policy_delete_security);
1104 set_to_cap_if_null(ops, xfrm_state_alloc_security); 1110 set_to_cap_if_null(ops, xfrm_state_alloc);
1111 set_to_cap_if_null(ops, xfrm_state_alloc_acquire);
1105 set_to_cap_if_null(ops, xfrm_state_free_security); 1112 set_to_cap_if_null(ops, xfrm_state_free_security);
1106 set_to_cap_if_null(ops, xfrm_state_delete_security); 1113 set_to_cap_if_null(ops, xfrm_state_delete_security);
1107 set_to_cap_if_null(ops, xfrm_policy_lookup); 1114 set_to_cap_if_null(ops, xfrm_policy_lookup);
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 0b759e17a131..b4af4ebc5be2 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -28,7 +28,7 @@ static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
28}; 28};
29 29
30int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen, 30int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
31 const char *digest, int digestlen) 31 const char *digest, int digestlen)
32{ 32{
33 if (id >= INTEGRITY_KEYRING_MAX) 33 if (id >= INTEGRITY_KEYRING_MAX)
34 return -EINVAL; 34 return -EINVAL;
@@ -44,9 +44,10 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
44 } 44 }
45 } 45 }
46 46
47 switch (sig[0]) { 47 switch (sig[1]) {
48 case 1: 48 case 1:
49 return digsig_verify(keyring[id], sig, siglen, 49 /* v1 API expect signature without xattr type */
50 return digsig_verify(keyring[id], sig + 1, siglen - 1,
50 digest, digestlen); 51 digest, digestlen);
51 case 2: 52 case 2:
52 return asymmetric_verify(keyring[id], sig, siglen, 53 return asymmetric_verify(keyring[id], sig, siglen,
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index b4754667659d..9eae4809006b 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -20,17 +20,6 @@
20#include "integrity.h" 20#include "integrity.h"
21 21
22/* 22/*
23 * signature format v2 - for using with asymmetric keys
24 */
25struct signature_v2_hdr {
26 uint8_t version; /* signature format version */
27 uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */
28 uint32_t keyid; /* IMA key identifier - not X509/PGP specific*/
29 uint16_t sig_size; /* signature size */
30 uint8_t sig[0]; /* signature payload */
31} __packed;
32
33/*
34 * Request an asymmetric key. 23 * Request an asymmetric key.
35 */ 24 */
36static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid) 25static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index af9b6852f4e1..336b3ddfe63f 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -123,7 +123,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
123 goto out; 123 goto out;
124 } 124 }
125 125
126 xattr_len = rc - 1; 126 xattr_len = rc;
127 127
128 /* check value type */ 128 /* check value type */
129 switch (xattr_data->type) { 129 switch (xattr_data->type) {
@@ -143,7 +143,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
143 if (rc) 143 if (rc)
144 break; 144 break;
145 rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM, 145 rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
146 xattr_data->digest, xattr_len, 146 (const char *)xattr_data, xattr_len,
147 calc.digest, sizeof(calc.digest)); 147 calc.digest, sizeof(calc.digest));
148 if (!rc) { 148 if (!rc) {
149 /* we probably want to replace rsa with hmac here */ 149 /* we probably want to replace rsa with hmac here */
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
index b1753e98bf9a..46408b9e62e8 100644
--- a/security/integrity/evm/evm_posix_acl.c
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -11,8 +11,9 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/xattr.h> 13#include <linux/xattr.h>
14#include <linux/evm.h>
14 15
15int posix_xattr_acl(char *xattr) 16int posix_xattr_acl(const char *xattr)
16{ 17{
17 int xattr_len = strlen(xattr); 18 int xattr_len = strlen(xattr);
18 19
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 74522dbd10a6..c49d3f14cbec 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -70,6 +70,8 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
70 70
71static void iint_free(struct integrity_iint_cache *iint) 71static void iint_free(struct integrity_iint_cache *iint)
72{ 72{
73 kfree(iint->ima_hash);
74 iint->ima_hash = NULL;
73 iint->version = 0; 75 iint->version = 0;
74 iint->flags = 0UL; 76 iint->flags = 0UL;
75 iint->ima_file_status = INTEGRITY_UNKNOWN; 77 iint->ima_file_status = INTEGRITY_UNKNOWN;
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 39196abaff0d..81a27971d884 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -9,6 +9,7 @@ config IMA
9 select CRYPTO_HMAC 9 select CRYPTO_HMAC
10 select CRYPTO_MD5 10 select CRYPTO_MD5
11 select CRYPTO_SHA1 11 select CRYPTO_SHA1
12 select CRYPTO_HASH_INFO
12 select TCG_TPM if HAS_IOMEM && !UML 13 select TCG_TPM if HAS_IOMEM && !UML
13 select TCG_TIS if TCG_TPM && X86 14 select TCG_TIS if TCG_TPM && X86
14 select TCG_IBMVTPM if TCG_TPM && PPC64 15 select TCG_IBMVTPM if TCG_TPM && PPC64
@@ -45,6 +46,69 @@ config IMA_LSM_RULES
45 help 46 help
46 Disabling this option will disregard LSM based policy rules. 47 Disabling this option will disregard LSM based policy rules.
47 48
49choice
50 prompt "Default template"
51 default IMA_NG_TEMPLATE
52 depends on IMA
53 help
54 Select the default IMA measurement template.
55
56 The original 'ima' measurement list template contains a
57 hash, defined as 20 bytes, and a null terminated pathname,
58 limited to 255 characters. The 'ima-ng' measurement list
59 template permits both larger hash digests and longer
60 pathnames.
61
62 config IMA_TEMPLATE
63 bool "ima"
64 config IMA_NG_TEMPLATE
65 bool "ima-ng (default)"
66 config IMA_SIG_TEMPLATE
67 bool "ima-sig"
68endchoice
69
70config IMA_DEFAULT_TEMPLATE
71 string
72 depends on IMA
73 default "ima" if IMA_TEMPLATE
74 default "ima-ng" if IMA_NG_TEMPLATE
75 default "ima-sig" if IMA_SIG_TEMPLATE
76
77choice
78 prompt "Default integrity hash algorithm"
79 default IMA_DEFAULT_HASH_SHA1
80 depends on IMA
81 help
82 Select the default hash algorithm used for the measurement
83 list, integrity appraisal and audit log. The compiled default
84 hash algorithm can be overwritten using the kernel command
85 line 'ima_hash=' option.
86
87 config IMA_DEFAULT_HASH_SHA1
88 bool "SHA1 (default)"
89 depends on CRYPTO_SHA1
90
91 config IMA_DEFAULT_HASH_SHA256
92 bool "SHA256"
93 depends on CRYPTO_SHA256 && !IMA_TEMPLATE
94
95 config IMA_DEFAULT_HASH_SHA512
96 bool "SHA512"
97 depends on CRYPTO_SHA512 && !IMA_TEMPLATE
98
99 config IMA_DEFAULT_HASH_WP512
100 bool "WP512"
101 depends on CRYPTO_WP512 && !IMA_TEMPLATE
102endchoice
103
104config IMA_DEFAULT_HASH
105 string
106 depends on IMA
107 default "sha1" if IMA_DEFAULT_HASH_SHA1
108 default "sha256" if IMA_DEFAULT_HASH_SHA256
109 default "sha512" if IMA_DEFAULT_HASH_SHA512
110 default "wp512" if IMA_DEFAULT_HASH_WP512
111
48config IMA_APPRAISE 112config IMA_APPRAISE
49 bool "Appraise integrity measurements" 113 bool "Appraise integrity measurements"
50 depends on IMA 114 depends on IMA
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 56dfee7cbf61..d79263d2fdbf 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -6,5 +6,5 @@
6obj-$(CONFIG_IMA) += ima.o 6obj-$(CONFIG_IMA) += ima.o
7 7
8ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \ 8ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
9 ima_policy.o 9 ima_policy.o ima_template.o ima_template_lib.o
10ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o 10ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index b3dd616560f7..0356e1d437ca 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -26,7 +26,8 @@
26 26
27#include "../integrity.h" 27#include "../integrity.h"
28 28
29enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII }; 29enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
30 IMA_SHOW_ASCII };
30enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; 31enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
31 32
32/* digest size for IMA, fits SHA1 or MD5 */ 33/* digest size for IMA, fits SHA1 or MD5 */
@@ -36,23 +37,48 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
36#define IMA_HASH_BITS 9 37#define IMA_HASH_BITS 9
37#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) 38#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
38 39
40#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
41#define IMA_TEMPLATE_NUM_FIELDS_MAX 15
42
43#define IMA_TEMPLATE_IMA_NAME "ima"
44#define IMA_TEMPLATE_IMA_FMT "d|n"
45
39/* set during initialization */ 46/* set during initialization */
40extern int ima_initialized; 47extern int ima_initialized;
41extern int ima_used_chip; 48extern int ima_used_chip;
42extern char *ima_hash; 49extern int ima_hash_algo;
43extern int ima_appraise; 50extern int ima_appraise;
44 51
45/* IMA inode template definition */ 52/* IMA template field data definition */
46struct ima_template_data { 53struct ima_field_data {
47 u8 digest[IMA_DIGEST_SIZE]; /* sha1/md5 measurement hash */ 54 u8 *data;
48 char file_name[IMA_EVENT_NAME_LEN_MAX + 1]; /* name + \0 */ 55 u32 len;
56};
57
58/* IMA template field definition */
59struct ima_template_field {
60 const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN];
61 int (*field_init) (struct integrity_iint_cache *iint, struct file *file,
62 const unsigned char *filename,
63 struct evm_ima_xattr_data *xattr_value,
64 int xattr_len, struct ima_field_data *field_data);
65 void (*field_show) (struct seq_file *m, enum ima_show_type show,
66 struct ima_field_data *field_data);
67};
68
69/* IMA template descriptor definition */
70struct ima_template_desc {
71 char *name;
72 char *fmt;
73 int num_fields;
74 struct ima_template_field **fields;
49}; 75};
50 76
51struct ima_template_entry { 77struct ima_template_entry {
52 u8 digest[IMA_DIGEST_SIZE]; /* sha1 or md5 measurement hash */ 78 u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
53 const char *template_name; 79 struct ima_template_desc *template_desc; /* template descriptor */
54 int template_len; 80 u32 template_data_len;
55 struct ima_template_data template; 81 struct ima_field_data template_data[0]; /* template related data */
56}; 82};
57 83
58struct ima_queue_entry { 84struct ima_queue_entry {
@@ -69,13 +95,22 @@ int ima_fs_init(void);
69void ima_fs_cleanup(void); 95void ima_fs_cleanup(void);
70int ima_inode_alloc(struct inode *inode); 96int ima_inode_alloc(struct inode *inode);
71int ima_add_template_entry(struct ima_template_entry *entry, int violation, 97int ima_add_template_entry(struct ima_template_entry *entry, int violation,
72 const char *op, struct inode *inode); 98 const char *op, struct inode *inode,
73int ima_calc_file_hash(struct file *file, char *digest); 99 const unsigned char *filename);
74int ima_calc_buffer_hash(const void *data, int len, char *digest); 100int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
75int ima_calc_boot_aggregate(char *digest); 101int ima_calc_field_array_hash(struct ima_field_data *field_data,
76void ima_add_violation(struct inode *inode, const unsigned char *filename, 102 struct ima_template_desc *desc, int num_fields,
103 struct ima_digest_data *hash);
104int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
105void ima_add_violation(struct file *file, const unsigned char *filename,
77 const char *op, const char *cause); 106 const char *op, const char *cause);
78int ima_init_crypto(void); 107int ima_init_crypto(void);
108void ima_putc(struct seq_file *m, void *data, int datalen);
109void ima_print_digest(struct seq_file *m, u8 *digest, int size);
110struct ima_template_desc *ima_template_desc_current(void);
111int ima_init_template(void);
112
113int ima_init_template(void);
79 114
80/* 115/*
81 * used to protect h_table and sha_table 116 * used to protect h_table and sha_table
@@ -98,14 +133,22 @@ static inline unsigned long ima_hash_key(u8 *digest)
98int ima_get_action(struct inode *inode, int mask, int function); 133int ima_get_action(struct inode *inode, int mask, int function);
99int ima_must_measure(struct inode *inode, int mask, int function); 134int ima_must_measure(struct inode *inode, int mask, int function);
100int ima_collect_measurement(struct integrity_iint_cache *iint, 135int ima_collect_measurement(struct integrity_iint_cache *iint,
101 struct file *file); 136 struct file *file,
137 struct evm_ima_xattr_data **xattr_value,
138 int *xattr_len);
102void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file, 139void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
103 const unsigned char *filename); 140 const unsigned char *filename,
141 struct evm_ima_xattr_data *xattr_value,
142 int xattr_len);
104void ima_audit_measurement(struct integrity_iint_cache *iint, 143void ima_audit_measurement(struct integrity_iint_cache *iint,
105 const unsigned char *filename); 144 const unsigned char *filename);
145int ima_alloc_init_template(struct integrity_iint_cache *iint,
146 struct file *file, const unsigned char *filename,
147 struct evm_ima_xattr_data *xattr_value,
148 int xattr_len, struct ima_template_entry **entry);
106int ima_store_template(struct ima_template_entry *entry, int violation, 149int ima_store_template(struct ima_template_entry *entry, int violation,
107 struct inode *inode); 150 struct inode *inode, const unsigned char *filename);
108void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show); 151void ima_free_template_entry(struct ima_template_entry *entry);
109const char *ima_d_path(struct path *path, char **pathbuf); 152const char *ima_d_path(struct path *path, char **pathbuf);
110 153
111/* rbtree tree calls to lookup, insert, delete 154/* rbtree tree calls to lookup, insert, delete
@@ -131,17 +174,25 @@ void ima_delete_rules(void);
131 174
132#ifdef CONFIG_IMA_APPRAISE 175#ifdef CONFIG_IMA_APPRAISE
133int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, 176int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
134 struct file *file, const unsigned char *filename); 177 struct file *file, const unsigned char *filename,
178 struct evm_ima_xattr_data *xattr_value,
179 int xattr_len);
135int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func); 180int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
136void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file); 181void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
137enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, 182enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
138 int func); 183 int func);
184void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
185 struct ima_digest_data *hash);
186int ima_read_xattr(struct dentry *dentry,
187 struct evm_ima_xattr_data **xattr_value);
139 188
140#else 189#else
141static inline int ima_appraise_measurement(int func, 190static inline int ima_appraise_measurement(int func,
142 struct integrity_iint_cache *iint, 191 struct integrity_iint_cache *iint,
143 struct file *file, 192 struct file *file,
144 const unsigned char *filename) 193 const unsigned char *filename,
194 struct evm_ima_xattr_data *xattr_value,
195 int xattr_len)
145{ 196{
146 return INTEGRITY_UNKNOWN; 197 return INTEGRITY_UNKNOWN;
147} 198}
@@ -162,6 +213,19 @@ static inline enum integrity_status ima_get_cache_status(struct integrity_iint_c
162{ 213{
163 return INTEGRITY_UNKNOWN; 214 return INTEGRITY_UNKNOWN;
164} 215}
216
217static inline void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
218 int xattr_len,
219 struct ima_digest_data *hash)
220{
221}
222
223static inline int ima_read_xattr(struct dentry *dentry,
224 struct evm_ima_xattr_data **xattr_value)
225{
226 return 0;
227}
228
165#endif 229#endif
166 230
167/* LSM based policy rules require audit */ 231/* LSM based policy rules require audit */
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 1c03e8f1e0e1..c38bbce8c6a6 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -18,9 +18,59 @@
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/xattr.h> 19#include <linux/xattr.h>
20#include <linux/evm.h> 20#include <linux/evm.h>
21#include <crypto/hash_info.h>
21#include "ima.h" 22#include "ima.h"
22 23
23static const char *IMA_TEMPLATE_NAME = "ima"; 24/*
25 * ima_free_template_entry - free an existing template entry
26 */
27void ima_free_template_entry(struct ima_template_entry *entry)
28{
29 int i;
30
31 for (i = 0; i < entry->template_desc->num_fields; i++)
32 kfree(entry->template_data[i].data);
33
34 kfree(entry);
35}
36
37/*
38 * ima_alloc_init_template - create and initialize a new template entry
39 */
40int ima_alloc_init_template(struct integrity_iint_cache *iint,
41 struct file *file, const unsigned char *filename,
42 struct evm_ima_xattr_data *xattr_value,
43 int xattr_len, struct ima_template_entry **entry)
44{
45 struct ima_template_desc *template_desc = ima_template_desc_current();
46 int i, result = 0;
47
48 *entry = kzalloc(sizeof(**entry) + template_desc->num_fields *
49 sizeof(struct ima_field_data), GFP_NOFS);
50 if (!*entry)
51 return -ENOMEM;
52
53 (*entry)->template_desc = template_desc;
54 for (i = 0; i < template_desc->num_fields; i++) {
55 struct ima_template_field *field = template_desc->fields[i];
56 u32 len;
57
58 result = field->field_init(iint, file, filename,
59 xattr_value, xattr_len,
60 &((*entry)->template_data[i]));
61 if (result != 0)
62 goto out;
63
64 len = (*entry)->template_data[i].len;
65 (*entry)->template_data_len += sizeof(len);
66 (*entry)->template_data_len += len;
67 }
68 return 0;
69out:
70 ima_free_template_entry(*entry);
71 *entry = NULL;
72 return result;
73}
24 74
25/* 75/*
26 * ima_store_template - store ima template measurements 76 * ima_store_template - store ima template measurements
@@ -39,28 +89,35 @@ static const char *IMA_TEMPLATE_NAME = "ima";
39 * Returns 0 on success, error code otherwise 89 * Returns 0 on success, error code otherwise
40 */ 90 */
41int ima_store_template(struct ima_template_entry *entry, 91int ima_store_template(struct ima_template_entry *entry,
42 int violation, struct inode *inode) 92 int violation, struct inode *inode,
93 const unsigned char *filename)
43{ 94{
44 const char *op = "add_template_measure"; 95 const char *op = "add_template_measure";
45 const char *audit_cause = "hashing_error"; 96 const char *audit_cause = "hashing_error";
97 char *template_name = entry->template_desc->name;
46 int result; 98 int result;
47 99 struct {
48 memset(entry->digest, 0, sizeof(entry->digest)); 100 struct ima_digest_data hdr;
49 entry->template_name = IMA_TEMPLATE_NAME; 101 char digest[TPM_DIGEST_SIZE];
50 entry->template_len = sizeof(entry->template); 102 } hash;
51 103
52 if (!violation) { 104 if (!violation) {
53 result = ima_calc_buffer_hash(&entry->template, 105 int num_fields = entry->template_desc->num_fields;
54 entry->template_len, 106
55 entry->digest); 107 /* this function uses default algo */
108 hash.hdr.algo = HASH_ALGO_SHA1;
109 result = ima_calc_field_array_hash(&entry->template_data[0],
110 entry->template_desc,
111 num_fields, &hash.hdr);
56 if (result < 0) { 112 if (result < 0) {
57 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, 113 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
58 entry->template_name, op, 114 template_name, op,
59 audit_cause, result, 0); 115 audit_cause, result, 0);
60 return result; 116 return result;
61 } 117 }
118 memcpy(entry->digest, hash.hdr.digest, hash.hdr.length);
62 } 119 }
63 result = ima_add_template_entry(entry, violation, op, inode); 120 result = ima_add_template_entry(entry, violation, op, inode, filename);
64 return result; 121 return result;
65} 122}
66 123
@@ -71,26 +128,26 @@ int ima_store_template(struct ima_template_entry *entry,
71 * By extending the PCR with 0xFF's instead of with zeroes, the PCR 128 * By extending the PCR with 0xFF's instead of with zeroes, the PCR
72 * value is invalidated. 129 * value is invalidated.
73 */ 130 */
74void ima_add_violation(struct inode *inode, const unsigned char *filename, 131void ima_add_violation(struct file *file, const unsigned char *filename,
75 const char *op, const char *cause) 132 const char *op, const char *cause)
76{ 133{
77 struct ima_template_entry *entry; 134 struct ima_template_entry *entry;
135 struct inode *inode = file->f_dentry->d_inode;
78 int violation = 1; 136 int violation = 1;
79 int result; 137 int result;
80 138
81 /* can overflow, only indicator */ 139 /* can overflow, only indicator */
82 atomic_long_inc(&ima_htable.violations); 140 atomic_long_inc(&ima_htable.violations);
83 141
84 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 142 result = ima_alloc_init_template(NULL, file, filename,
85 if (!entry) { 143 NULL, 0, &entry);
144 if (result < 0) {
86 result = -ENOMEM; 145 result = -ENOMEM;
87 goto err_out; 146 goto err_out;
88 } 147 }
89 memset(&entry->template, 0, sizeof(entry->template)); 148 result = ima_store_template(entry, violation, inode, filename);
90 strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
91 result = ima_store_template(entry, violation, inode);
92 if (result < 0) 149 if (result < 0)
93 kfree(entry); 150 ima_free_template_entry(entry);
94err_out: 151err_out:
95 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, 152 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
96 op, cause, result, 0); 153 op, cause, result, 0);
@@ -138,20 +195,42 @@ int ima_must_measure(struct inode *inode, int mask, int function)
138 * Return 0 on success, error code otherwise 195 * Return 0 on success, error code otherwise
139 */ 196 */
140int ima_collect_measurement(struct integrity_iint_cache *iint, 197int ima_collect_measurement(struct integrity_iint_cache *iint,
141 struct file *file) 198 struct file *file,
199 struct evm_ima_xattr_data **xattr_value,
200 int *xattr_len)
142{ 201{
143 struct inode *inode = file_inode(file); 202 struct inode *inode = file_inode(file);
144 const char *filename = file->f_dentry->d_name.name; 203 const char *filename = file->f_dentry->d_name.name;
145 int result = 0; 204 int result = 0;
205 struct {
206 struct ima_digest_data hdr;
207 char digest[IMA_MAX_DIGEST_SIZE];
208 } hash;
209
210 if (xattr_value)
211 *xattr_len = ima_read_xattr(file->f_dentry, xattr_value);
146 212
147 if (!(iint->flags & IMA_COLLECTED)) { 213 if (!(iint->flags & IMA_COLLECTED)) {
148 u64 i_version = file_inode(file)->i_version; 214 u64 i_version = file_inode(file)->i_version;
149 215
150 iint->ima_xattr.type = IMA_XATTR_DIGEST; 216 /* use default hash algorithm */
151 result = ima_calc_file_hash(file, iint->ima_xattr.digest); 217 hash.hdr.algo = ima_hash_algo;
218
219 if (xattr_value)
220 ima_get_hash_algo(*xattr_value, *xattr_len, &hash.hdr);
221
222 result = ima_calc_file_hash(file, &hash.hdr);
152 if (!result) { 223 if (!result) {
153 iint->version = i_version; 224 int length = sizeof(hash.hdr) + hash.hdr.length;
154 iint->flags |= IMA_COLLECTED; 225 void *tmpbuf = krealloc(iint->ima_hash, length,
226 GFP_NOFS);
227 if (tmpbuf) {
228 iint->ima_hash = tmpbuf;
229 memcpy(iint->ima_hash, &hash, length);
230 iint->version = i_version;
231 iint->flags |= IMA_COLLECTED;
232 } else
233 result = -ENOMEM;
155 } 234 }
156 } 235 }
157 if (result) 236 if (result)
@@ -177,7 +256,9 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
177 * Must be called with iint->mutex held. 256 * Must be called with iint->mutex held.
178 */ 257 */
179void ima_store_measurement(struct integrity_iint_cache *iint, 258void ima_store_measurement(struct integrity_iint_cache *iint,
180 struct file *file, const unsigned char *filename) 259 struct file *file, const unsigned char *filename,
260 struct evm_ima_xattr_data *xattr_value,
261 int xattr_len)
181{ 262{
182 const char *op = "add_template_measure"; 263 const char *op = "add_template_measure";
183 const char *audit_cause = "ENOMEM"; 264 const char *audit_cause = "ENOMEM";
@@ -189,37 +270,35 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
189 if (iint->flags & IMA_MEASURED) 270 if (iint->flags & IMA_MEASURED)
190 return; 271 return;
191 272
192 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 273 result = ima_alloc_init_template(iint, file, filename,
193 if (!entry) { 274 xattr_value, xattr_len, &entry);
275 if (result < 0) {
194 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, 276 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
195 op, audit_cause, result, 0); 277 op, audit_cause, result, 0);
196 return; 278 return;
197 } 279 }
198 memset(&entry->template, 0, sizeof(entry->template));
199 memcpy(entry->template.digest, iint->ima_xattr.digest, IMA_DIGEST_SIZE);
200 strcpy(entry->template.file_name,
201 (strlen(filename) > IMA_EVENT_NAME_LEN_MAX) ?
202 file->f_dentry->d_name.name : filename);
203 280
204 result = ima_store_template(entry, violation, inode); 281 result = ima_store_template(entry, violation, inode, filename);
205 if (!result || result == -EEXIST) 282 if (!result || result == -EEXIST)
206 iint->flags |= IMA_MEASURED; 283 iint->flags |= IMA_MEASURED;
207 if (result < 0) 284 if (result < 0)
208 kfree(entry); 285 ima_free_template_entry(entry);
209} 286}
210 287
211void ima_audit_measurement(struct integrity_iint_cache *iint, 288void ima_audit_measurement(struct integrity_iint_cache *iint,
212 const unsigned char *filename) 289 const unsigned char *filename)
213{ 290{
214 struct audit_buffer *ab; 291 struct audit_buffer *ab;
215 char hash[(IMA_DIGEST_SIZE * 2) + 1]; 292 char hash[(iint->ima_hash->length * 2) + 1];
293 const char *algo_name = hash_algo_name[iint->ima_hash->algo];
294 char algo_hash[sizeof(hash) + strlen(algo_name) + 2];
216 int i; 295 int i;
217 296
218 if (iint->flags & IMA_AUDITED) 297 if (iint->flags & IMA_AUDITED)
219 return; 298 return;
220 299
221 for (i = 0; i < IMA_DIGEST_SIZE; i++) 300 for (i = 0; i < iint->ima_hash->length; i++)
222 hex_byte_pack(hash + (i * 2), iint->ima_xattr.digest[i]); 301 hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]);
223 hash[i * 2] = '\0'; 302 hash[i * 2] = '\0';
224 303
225 ab = audit_log_start(current->audit_context, GFP_KERNEL, 304 ab = audit_log_start(current->audit_context, GFP_KERNEL,
@@ -230,7 +309,8 @@ void ima_audit_measurement(struct integrity_iint_cache *iint,
230 audit_log_format(ab, "file="); 309 audit_log_format(ab, "file=");
231 audit_log_untrustedstring(ab, filename); 310 audit_log_untrustedstring(ab, filename);
232 audit_log_format(ab, " hash="); 311 audit_log_format(ab, " hash=");
233 audit_log_untrustedstring(ab, hash); 312 snprintf(algo_hash, sizeof(algo_hash), "%s:%s", algo_name, hash);
313 audit_log_untrustedstring(ab, algo_hash);
234 314
235 audit_log_task_info(ab, current); 315 audit_log_task_info(ab, current);
236 audit_log_end(ab); 316 audit_log_end(ab);
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 2d4becab8918..734e9468aca0 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -15,6 +15,7 @@
15#include <linux/magic.h> 15#include <linux/magic.h>
16#include <linux/ima.h> 16#include <linux/ima.h>
17#include <linux/evm.h> 17#include <linux/evm.h>
18#include <crypto/hash_info.h>
18 19
19#include "ima.h" 20#include "ima.h"
20 21
@@ -43,19 +44,31 @@ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
43} 44}
44 45
45static int ima_fix_xattr(struct dentry *dentry, 46static int ima_fix_xattr(struct dentry *dentry,
46 struct integrity_iint_cache *iint) 47 struct integrity_iint_cache *iint)
47{ 48{
48 iint->ima_xattr.type = IMA_XATTR_DIGEST; 49 int rc, offset;
49 return __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA, 50 u8 algo = iint->ima_hash->algo;
50 (u8 *)&iint->ima_xattr, 51
51 sizeof(iint->ima_xattr), 0); 52 if (algo <= HASH_ALGO_SHA1) {
53 offset = 1;
54 iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST;
55 } else {
56 offset = 0;
57 iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG;
58 iint->ima_hash->xattr.ng.algo = algo;
59 }
60 rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA,
61 &iint->ima_hash->xattr.data[offset],
62 (sizeof(iint->ima_hash->xattr) - offset) +
63 iint->ima_hash->length, 0);
64 return rc;
52} 65}
53 66
54/* Return specific func appraised cached result */ 67/* Return specific func appraised cached result */
55enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, 68enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
56 int func) 69 int func)
57{ 70{
58 switch(func) { 71 switch (func) {
59 case MMAP_CHECK: 72 case MMAP_CHECK:
60 return iint->ima_mmap_status; 73 return iint->ima_mmap_status;
61 case BPRM_CHECK: 74 case BPRM_CHECK:
@@ -71,7 +84,7 @@ enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
71static void ima_set_cache_status(struct integrity_iint_cache *iint, 84static void ima_set_cache_status(struct integrity_iint_cache *iint,
72 int func, enum integrity_status status) 85 int func, enum integrity_status status)
73{ 86{
74 switch(func) { 87 switch (func) {
75 case MMAP_CHECK: 88 case MMAP_CHECK:
76 iint->ima_mmap_status = status; 89 iint->ima_mmap_status = status;
77 break; 90 break;
@@ -90,7 +103,7 @@ static void ima_set_cache_status(struct integrity_iint_cache *iint,
90 103
91static void ima_cache_flags(struct integrity_iint_cache *iint, int func) 104static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
92{ 105{
93 switch(func) { 106 switch (func) {
94 case MMAP_CHECK: 107 case MMAP_CHECK:
95 iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED); 108 iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED);
96 break; 109 break;
@@ -107,6 +120,50 @@ static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
107 } 120 }
108} 121}
109 122
123void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
124 struct ima_digest_data *hash)
125{
126 struct signature_v2_hdr *sig;
127
128 if (!xattr_value || xattr_len < 2)
129 return;
130
131 switch (xattr_value->type) {
132 case EVM_IMA_XATTR_DIGSIG:
133 sig = (typeof(sig))xattr_value;
134 if (sig->version != 2 || xattr_len <= sizeof(*sig))
135 return;
136 hash->algo = sig->hash_algo;
137 break;
138 case IMA_XATTR_DIGEST_NG:
139 hash->algo = xattr_value->digest[0];
140 break;
141 case IMA_XATTR_DIGEST:
142 /* this is for backward compatibility */
143 if (xattr_len == 21) {
144 unsigned int zero = 0;
145 if (!memcmp(&xattr_value->digest[16], &zero, 4))
146 hash->algo = HASH_ALGO_MD5;
147 else
148 hash->algo = HASH_ALGO_SHA1;
149 } else if (xattr_len == 17)
150 hash->algo = HASH_ALGO_MD5;
151 break;
152 }
153}
154
155int ima_read_xattr(struct dentry *dentry,
156 struct evm_ima_xattr_data **xattr_value)
157{
158 struct inode *inode = dentry->d_inode;
159
160 if (!inode->i_op->getxattr)
161 return 0;
162
163 return vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value,
164 0, GFP_NOFS);
165}
166
110/* 167/*
111 * ima_appraise_measurement - appraise file measurement 168 * ima_appraise_measurement - appraise file measurement
112 * 169 *
@@ -116,23 +173,22 @@ static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
116 * Return 0 on success, error code otherwise 173 * Return 0 on success, error code otherwise
117 */ 174 */
118int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, 175int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
119 struct file *file, const unsigned char *filename) 176 struct file *file, const unsigned char *filename,
177 struct evm_ima_xattr_data *xattr_value,
178 int xattr_len)
120{ 179{
121 struct dentry *dentry = file->f_dentry; 180 struct dentry *dentry = file->f_dentry;
122 struct inode *inode = dentry->d_inode; 181 struct inode *inode = dentry->d_inode;
123 struct evm_ima_xattr_data *xattr_value = NULL;
124 enum integrity_status status = INTEGRITY_UNKNOWN; 182 enum integrity_status status = INTEGRITY_UNKNOWN;
125 const char *op = "appraise_data"; 183 const char *op = "appraise_data";
126 char *cause = "unknown"; 184 char *cause = "unknown";
127 int rc; 185 int rc = xattr_len, hash_start = 0;
128 186
129 if (!ima_appraise) 187 if (!ima_appraise)
130 return 0; 188 return 0;
131 if (!inode->i_op->getxattr) 189 if (!inode->i_op->getxattr)
132 return INTEGRITY_UNKNOWN; 190 return INTEGRITY_UNKNOWN;
133 191
134 rc = vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)&xattr_value,
135 0, GFP_NOFS);
136 if (rc <= 0) { 192 if (rc <= 0) {
137 if (rc && rc != -ENODATA) 193 if (rc && rc != -ENODATA)
138 goto out; 194 goto out;
@@ -153,14 +209,25 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
153 goto out; 209 goto out;
154 } 210 }
155 switch (xattr_value->type) { 211 switch (xattr_value->type) {
212 case IMA_XATTR_DIGEST_NG:
213 /* first byte contains algorithm id */
214 hash_start = 1;
156 case IMA_XATTR_DIGEST: 215 case IMA_XATTR_DIGEST:
157 if (iint->flags & IMA_DIGSIG_REQUIRED) { 216 if (iint->flags & IMA_DIGSIG_REQUIRED) {
158 cause = "IMA signature required"; 217 cause = "IMA signature required";
159 status = INTEGRITY_FAIL; 218 status = INTEGRITY_FAIL;
160 break; 219 break;
161 } 220 }
162 rc = memcmp(xattr_value->digest, iint->ima_xattr.digest, 221 if (xattr_len - sizeof(xattr_value->type) - hash_start >=
163 IMA_DIGEST_SIZE); 222 iint->ima_hash->length)
223 /* xattr length may be longer. md5 hash in previous
224 version occupied 20 bytes in xattr, instead of 16
225 */
226 rc = memcmp(&xattr_value->digest[hash_start],
227 iint->ima_hash->digest,
228 iint->ima_hash->length);
229 else
230 rc = -EINVAL;
164 if (rc) { 231 if (rc) {
165 cause = "invalid-hash"; 232 cause = "invalid-hash";
166 status = INTEGRITY_FAIL; 233 status = INTEGRITY_FAIL;
@@ -171,9 +238,9 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
171 case EVM_IMA_XATTR_DIGSIG: 238 case EVM_IMA_XATTR_DIGSIG:
172 iint->flags |= IMA_DIGSIG; 239 iint->flags |= IMA_DIGSIG;
173 rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA, 240 rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
174 xattr_value->digest, rc - 1, 241 (const char *)xattr_value, rc,
175 iint->ima_xattr.digest, 242 iint->ima_hash->digest,
176 IMA_DIGEST_SIZE); 243 iint->ima_hash->length);
177 if (rc == -EOPNOTSUPP) { 244 if (rc == -EOPNOTSUPP) {
178 status = INTEGRITY_UNKNOWN; 245 status = INTEGRITY_UNKNOWN;
179 } else if (rc) { 246 } else if (rc) {
@@ -203,7 +270,6 @@ out:
203 ima_cache_flags(iint, func); 270 ima_cache_flags(iint, func);
204 } 271 }
205 ima_set_cache_status(iint, func, status); 272 ima_set_cache_status(iint, func, status);
206 kfree(xattr_value);
207 return status; 273 return status;
208} 274}
209 275
@@ -219,7 +285,7 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
219 if (iint->flags & IMA_DIGSIG) 285 if (iint->flags & IMA_DIGSIG)
220 return; 286 return;
221 287
222 rc = ima_collect_measurement(iint, file); 288 rc = ima_collect_measurement(iint, file, NULL, NULL);
223 if (rc < 0) 289 if (rc < 0)
224 return; 290 return;
225 291
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index a02e0791cf15..fdf60def52e9 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -20,6 +20,7 @@
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <crypto/hash.h> 22#include <crypto/hash.h>
23#include <crypto/hash_info.h>
23#include "ima.h" 24#include "ima.h"
24 25
25static struct crypto_shash *ima_shash_tfm; 26static struct crypto_shash *ima_shash_tfm;
@@ -28,31 +29,58 @@ int ima_init_crypto(void)
28{ 29{
29 long rc; 30 long rc;
30 31
31 ima_shash_tfm = crypto_alloc_shash(ima_hash, 0, 0); 32 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
32 if (IS_ERR(ima_shash_tfm)) { 33 if (IS_ERR(ima_shash_tfm)) {
33 rc = PTR_ERR(ima_shash_tfm); 34 rc = PTR_ERR(ima_shash_tfm);
34 pr_err("Can not allocate %s (reason: %ld)\n", ima_hash, rc); 35 pr_err("Can not allocate %s (reason: %ld)\n",
36 hash_algo_name[ima_hash_algo], rc);
35 return rc; 37 return rc;
36 } 38 }
37 return 0; 39 return 0;
38} 40}
39 41
42static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
43{
44 struct crypto_shash *tfm = ima_shash_tfm;
45 int rc;
46
47 if (algo != ima_hash_algo && algo < HASH_ALGO__LAST) {
48 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
49 if (IS_ERR(tfm)) {
50 rc = PTR_ERR(tfm);
51 pr_err("Can not allocate %s (reason: %d)\n",
52 hash_algo_name[algo], rc);
53 }
54 }
55 return tfm;
56}
57
58static void ima_free_tfm(struct crypto_shash *tfm)
59{
60 if (tfm != ima_shash_tfm)
61 crypto_free_shash(tfm);
62}
63
40/* 64/*
41 * Calculate the MD5/SHA1 file digest 65 * Calculate the MD5/SHA1 file digest
42 */ 66 */
43int ima_calc_file_hash(struct file *file, char *digest) 67static int ima_calc_file_hash_tfm(struct file *file,
68 struct ima_digest_data *hash,
69 struct crypto_shash *tfm)
44{ 70{
45 loff_t i_size, offset = 0; 71 loff_t i_size, offset = 0;
46 char *rbuf; 72 char *rbuf;
47 int rc, read = 0; 73 int rc, read = 0;
48 struct { 74 struct {
49 struct shash_desc shash; 75 struct shash_desc shash;
50 char ctx[crypto_shash_descsize(ima_shash_tfm)]; 76 char ctx[crypto_shash_descsize(tfm)];
51 } desc; 77 } desc;
52 78
53 desc.shash.tfm = ima_shash_tfm; 79 desc.shash.tfm = tfm;
54 desc.shash.flags = 0; 80 desc.shash.flags = 0;
55 81
82 hash->length = crypto_shash_digestsize(tfm);
83
56 rc = crypto_shash_init(&desc.shash); 84 rc = crypto_shash_init(&desc.shash);
57 if (rc != 0) 85 if (rc != 0)
58 return rc; 86 return rc;
@@ -85,27 +113,90 @@ int ima_calc_file_hash(struct file *file, char *digest)
85 } 113 }
86 kfree(rbuf); 114 kfree(rbuf);
87 if (!rc) 115 if (!rc)
88 rc = crypto_shash_final(&desc.shash, digest); 116 rc = crypto_shash_final(&desc.shash, hash->digest);
89 if (read) 117 if (read)
90 file->f_mode &= ~FMODE_READ; 118 file->f_mode &= ~FMODE_READ;
91out: 119out:
92 return rc; 120 return rc;
93} 121}
94 122
123int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
124{
125 struct crypto_shash *tfm;
126 int rc;
127
128 tfm = ima_alloc_tfm(hash->algo);
129 if (IS_ERR(tfm))
130 return PTR_ERR(tfm);
131
132 rc = ima_calc_file_hash_tfm(file, hash, tfm);
133
134 ima_free_tfm(tfm);
135
136 return rc;
137}
138
95/* 139/*
96 * Calculate the hash of a given buffer 140 * Calculate the hash of template data
97 */ 141 */
98int ima_calc_buffer_hash(const void *data, int len, char *digest) 142static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
143 struct ima_template_desc *td,
144 int num_fields,
145 struct ima_digest_data *hash,
146 struct crypto_shash *tfm)
99{ 147{
100 struct { 148 struct {
101 struct shash_desc shash; 149 struct shash_desc shash;
102 char ctx[crypto_shash_descsize(ima_shash_tfm)]; 150 char ctx[crypto_shash_descsize(tfm)];
103 } desc; 151 } desc;
152 int rc, i;
104 153
105 desc.shash.tfm = ima_shash_tfm; 154 desc.shash.tfm = tfm;
106 desc.shash.flags = 0; 155 desc.shash.flags = 0;
107 156
108 return crypto_shash_digest(&desc.shash, data, len, digest); 157 hash->length = crypto_shash_digestsize(tfm);
158
159 rc = crypto_shash_init(&desc.shash);
160 if (rc != 0)
161 return rc;
162
163 for (i = 0; i < num_fields; i++) {
164 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
165 rc = crypto_shash_update(&desc.shash,
166 (const u8 *) &field_data[i].len,
167 sizeof(field_data[i].len));
168 if (rc)
169 break;
170 }
171 rc = crypto_shash_update(&desc.shash, field_data[i].data,
172 field_data[i].len);
173 if (rc)
174 break;
175 }
176
177 if (!rc)
178 rc = crypto_shash_final(&desc.shash, hash->digest);
179
180 return rc;
181}
182
183int ima_calc_field_array_hash(struct ima_field_data *field_data,
184 struct ima_template_desc *desc, int num_fields,
185 struct ima_digest_data *hash)
186{
187 struct crypto_shash *tfm;
188 int rc;
189
190 tfm = ima_alloc_tfm(hash->algo);
191 if (IS_ERR(tfm))
192 return PTR_ERR(tfm);
193
194 rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
195 hash, tfm);
196
197 ima_free_tfm(tfm);
198
199 return rc;
109} 200}
110 201
111static void __init ima_pcrread(int idx, u8 *pcr) 202static void __init ima_pcrread(int idx, u8 *pcr)
@@ -120,16 +211,17 @@ static void __init ima_pcrread(int idx, u8 *pcr)
120/* 211/*
121 * Calculate the boot aggregate hash 212 * Calculate the boot aggregate hash
122 */ 213 */
123int __init ima_calc_boot_aggregate(char *digest) 214static int __init ima_calc_boot_aggregate_tfm(char *digest,
215 struct crypto_shash *tfm)
124{ 216{
125 u8 pcr_i[IMA_DIGEST_SIZE]; 217 u8 pcr_i[TPM_DIGEST_SIZE];
126 int rc, i; 218 int rc, i;
127 struct { 219 struct {
128 struct shash_desc shash; 220 struct shash_desc shash;
129 char ctx[crypto_shash_descsize(ima_shash_tfm)]; 221 char ctx[crypto_shash_descsize(tfm)];
130 } desc; 222 } desc;
131 223
132 desc.shash.tfm = ima_shash_tfm; 224 desc.shash.tfm = tfm;
133 desc.shash.flags = 0; 225 desc.shash.flags = 0;
134 226
135 rc = crypto_shash_init(&desc.shash); 227 rc = crypto_shash_init(&desc.shash);
@@ -140,9 +232,26 @@ int __init ima_calc_boot_aggregate(char *digest)
140 for (i = TPM_PCR0; i < TPM_PCR8; i++) { 232 for (i = TPM_PCR0; i < TPM_PCR8; i++) {
141 ima_pcrread(i, pcr_i); 233 ima_pcrread(i, pcr_i);
142 /* now accumulate with current aggregate */ 234 /* now accumulate with current aggregate */
143 rc = crypto_shash_update(&desc.shash, pcr_i, IMA_DIGEST_SIZE); 235 rc = crypto_shash_update(&desc.shash, pcr_i, TPM_DIGEST_SIZE);
144 } 236 }
145 if (!rc) 237 if (!rc)
146 crypto_shash_final(&desc.shash, digest); 238 crypto_shash_final(&desc.shash, digest);
147 return rc; 239 return rc;
148} 240}
241
242int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
243{
244 struct crypto_shash *tfm;
245 int rc;
246
247 tfm = ima_alloc_tfm(hash->algo);
248 if (IS_ERR(tfm))
249 return PTR_ERR(tfm);
250
251 hash->length = crypto_shash_digestsize(tfm);
252 rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
253
254 ima_free_tfm(tfm);
255
256 return rc;
257}
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 38477c9c3415..db01125926bd 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -88,8 +88,7 @@ static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
88 * against concurrent list-extension 88 * against concurrent list-extension
89 */ 89 */
90 rcu_read_lock(); 90 rcu_read_lock();
91 qe = list_entry_rcu(qe->later.next, 91 qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
92 struct ima_queue_entry, later);
93 rcu_read_unlock(); 92 rcu_read_unlock();
94 (*pos)++; 93 (*pos)++;
95 94
@@ -100,7 +99,7 @@ static void ima_measurements_stop(struct seq_file *m, void *v)
100{ 99{
101} 100}
102 101
103static void ima_putc(struct seq_file *m, void *data, int datalen) 102void ima_putc(struct seq_file *m, void *data, int datalen)
104{ 103{
105 while (datalen--) 104 while (datalen--)
106 seq_putc(m, *(char *)data++); 105 seq_putc(m, *(char *)data++);
@@ -111,6 +110,7 @@ static void ima_putc(struct seq_file *m, void *data, int datalen)
111 * char[20]=template digest 110 * char[20]=template digest
112 * 32bit-le=template name size 111 * 32bit-le=template name size
113 * char[n]=template name 112 * char[n]=template name
113 * [eventdata length]
114 * eventdata[n]=template specific data 114 * eventdata[n]=template specific data
115 */ 115 */
116static int ima_measurements_show(struct seq_file *m, void *v) 116static int ima_measurements_show(struct seq_file *m, void *v)
@@ -120,6 +120,8 @@ static int ima_measurements_show(struct seq_file *m, void *v)
120 struct ima_template_entry *e; 120 struct ima_template_entry *e;
121 int namelen; 121 int namelen;
122 u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX; 122 u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX;
123 bool is_ima_template = false;
124 int i;
123 125
124 /* get entry */ 126 /* get entry */
125 e = qe->entry; 127 e = qe->entry;
@@ -134,18 +136,32 @@ static int ima_measurements_show(struct seq_file *m, void *v)
134 ima_putc(m, &pcr, sizeof pcr); 136 ima_putc(m, &pcr, sizeof pcr);
135 137
136 /* 2nd: template digest */ 138 /* 2nd: template digest */
137 ima_putc(m, e->digest, IMA_DIGEST_SIZE); 139 ima_putc(m, e->digest, TPM_DIGEST_SIZE);
138 140
139 /* 3rd: template name size */ 141 /* 3rd: template name size */
140 namelen = strlen(e->template_name); 142 namelen = strlen(e->template_desc->name);
141 ima_putc(m, &namelen, sizeof namelen); 143 ima_putc(m, &namelen, sizeof namelen);
142 144
143 /* 4th: template name */ 145 /* 4th: template name */
144 ima_putc(m, (void *)e->template_name, namelen); 146 ima_putc(m, e->template_desc->name, namelen);
147
148 /* 5th: template length (except for 'ima' template) */
149 if (strcmp(e->template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0)
150 is_ima_template = true;
151
152 if (!is_ima_template)
153 ima_putc(m, &e->template_data_len,
154 sizeof(e->template_data_len));
155
156 /* 6th: template specific data */
157 for (i = 0; i < e->template_desc->num_fields; i++) {
158 enum ima_show_type show = IMA_SHOW_BINARY;
159 struct ima_template_field *field = e->template_desc->fields[i];
145 160
146 /* 5th: template specific data */ 161 if (is_ima_template && strcmp(field->field_id, "d") == 0)
147 ima_template_show(m, (struct ima_template_data *)&e->template, 162 show = IMA_SHOW_BINARY_NO_FIELD_LEN;
148 IMA_SHOW_BINARY); 163 field->field_show(m, show, &e->template_data[i]);
164 }
149 return 0; 165 return 0;
150} 166}
151 167
@@ -168,41 +184,21 @@ static const struct file_operations ima_measurements_ops = {
168 .release = seq_release, 184 .release = seq_release,
169}; 185};
170 186
171static void ima_print_digest(struct seq_file *m, u8 *digest) 187void ima_print_digest(struct seq_file *m, u8 *digest, int size)
172{ 188{
173 int i; 189 int i;
174 190
175 for (i = 0; i < IMA_DIGEST_SIZE; i++) 191 for (i = 0; i < size; i++)
176 seq_printf(m, "%02x", *(digest + i)); 192 seq_printf(m, "%02x", *(digest + i));
177} 193}
178 194
179void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show)
180{
181 struct ima_template_data *entry = e;
182 int namelen;
183
184 switch (show) {
185 case IMA_SHOW_ASCII:
186 ima_print_digest(m, entry->digest);
187 seq_printf(m, " %s\n", entry->file_name);
188 break;
189 case IMA_SHOW_BINARY:
190 ima_putc(m, entry->digest, IMA_DIGEST_SIZE);
191
192 namelen = strlen(entry->file_name);
193 ima_putc(m, &namelen, sizeof namelen);
194 ima_putc(m, entry->file_name, namelen);
195 default:
196 break;
197 }
198}
199
200/* print in ascii */ 195/* print in ascii */
201static int ima_ascii_measurements_show(struct seq_file *m, void *v) 196static int ima_ascii_measurements_show(struct seq_file *m, void *v)
202{ 197{
203 /* the list never shrinks, so we don't need a lock here */ 198 /* the list never shrinks, so we don't need a lock here */
204 struct ima_queue_entry *qe = v; 199 struct ima_queue_entry *qe = v;
205 struct ima_template_entry *e; 200 struct ima_template_entry *e;
201 int i;
206 202
207 /* get entry */ 203 /* get entry */
208 e = qe->entry; 204 e = qe->entry;
@@ -213,14 +209,21 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
213 seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX); 209 seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX);
214 210
215 /* 2nd: SHA1 template hash */ 211 /* 2nd: SHA1 template hash */
216 ima_print_digest(m, e->digest); 212 ima_print_digest(m, e->digest, TPM_DIGEST_SIZE);
217 213
218 /* 3th: template name */ 214 /* 3th: template name */
219 seq_printf(m, " %s ", e->template_name); 215 seq_printf(m, " %s", e->template_desc->name);
220 216
221 /* 4th: template specific data */ 217 /* 4th: template specific data */
222 ima_template_show(m, (struct ima_template_data *)&e->template, 218 for (i = 0; i < e->template_desc->num_fields; i++) {
223 IMA_SHOW_ASCII); 219 seq_puts(m, " ");
220 if (e->template_data[i].len == 0)
221 continue;
222
223 e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII,
224 &e->template_data[i]);
225 }
226 seq_puts(m, "\n");
224 return 0; 227 return 0;
225} 228}
226 229
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 162ea723db3d..37122768554a 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -18,6 +18,7 @@
18#include <linux/scatterlist.h> 18#include <linux/scatterlist.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/err.h> 20#include <linux/err.h>
21#include <crypto/hash_info.h>
21#include "ima.h" 22#include "ima.h"
22 23
23/* name for boot aggregate entry */ 24/* name for boot aggregate entry */
@@ -42,30 +43,39 @@ int ima_used_chip;
42static void __init ima_add_boot_aggregate(void) 43static void __init ima_add_boot_aggregate(void)
43{ 44{
44 struct ima_template_entry *entry; 45 struct ima_template_entry *entry;
46 struct integrity_iint_cache tmp_iint, *iint = &tmp_iint;
45 const char *op = "add_boot_aggregate"; 47 const char *op = "add_boot_aggregate";
46 const char *audit_cause = "ENOMEM"; 48 const char *audit_cause = "ENOMEM";
47 int result = -ENOMEM; 49 int result = -ENOMEM;
48 int violation = 1; 50 int violation = 0;
51 struct {
52 struct ima_digest_data hdr;
53 char digest[TPM_DIGEST_SIZE];
54 } hash;
49 55
50 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 56 memset(iint, 0, sizeof(*iint));
51 if (!entry) 57 memset(&hash, 0, sizeof(hash));
52 goto err_out; 58 iint->ima_hash = &hash.hdr;
59 iint->ima_hash->algo = HASH_ALGO_SHA1;
60 iint->ima_hash->length = SHA1_DIGEST_SIZE;
53 61
54 memset(&entry->template, 0, sizeof(entry->template));
55 strncpy(entry->template.file_name, boot_aggregate_name,
56 IMA_EVENT_NAME_LEN_MAX);
57 if (ima_used_chip) { 62 if (ima_used_chip) {
58 violation = 0; 63 result = ima_calc_boot_aggregate(&hash.hdr);
59 result = ima_calc_boot_aggregate(entry->template.digest);
60 if (result < 0) { 64 if (result < 0) {
61 audit_cause = "hashing_error"; 65 audit_cause = "hashing_error";
62 kfree(entry);
63 goto err_out; 66 goto err_out;
64 } 67 }
65 } 68 }
66 result = ima_store_template(entry, violation, NULL); 69
70 result = ima_alloc_init_template(iint, NULL, boot_aggregate_name,
71 NULL, 0, &entry);
72 if (result < 0)
73 return;
74
75 result = ima_store_template(entry, violation, NULL,
76 boot_aggregate_name);
67 if (result < 0) 77 if (result < 0)
68 kfree(entry); 78 ima_free_template_entry(entry);
69 return; 79 return;
70err_out: 80err_out:
71 integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op, 81 integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
@@ -74,7 +84,7 @@ err_out:
74 84
75int __init ima_init(void) 85int __init ima_init(void)
76{ 86{
77 u8 pcr_i[IMA_DIGEST_SIZE]; 87 u8 pcr_i[TPM_DIGEST_SIZE];
78 int rc; 88 int rc;
79 89
80 ima_used_chip = 0; 90 ima_used_chip = 0;
@@ -88,6 +98,10 @@ int __init ima_init(void)
88 rc = ima_init_crypto(); 98 rc = ima_init_crypto();
89 if (rc) 99 if (rc)
90 return rc; 100 return rc;
101 rc = ima_init_template();
102 if (rc != 0)
103 return rc;
104
91 ima_add_boot_aggregate(); /* boot aggregate must be first entry */ 105 ima_add_boot_aggregate(); /* boot aggregate must be first entry */
92 ima_init_policy(); 106 ima_init_policy();
93 107
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index e9508d5bbfcf..149ee1119f87 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -24,6 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/xattr.h> 25#include <linux/xattr.h>
26#include <linux/ima.h> 26#include <linux/ima.h>
27#include <crypto/hash_info.h>
27 28
28#include "ima.h" 29#include "ima.h"
29 30
@@ -35,11 +36,33 @@ int ima_appraise = IMA_APPRAISE_ENFORCE;
35int ima_appraise; 36int ima_appraise;
36#endif 37#endif
37 38
38char *ima_hash = "sha1"; 39int ima_hash_algo = HASH_ALGO_SHA1;
40static int hash_setup_done;
41
39static int __init hash_setup(char *str) 42static int __init hash_setup(char *str)
40{ 43{
41 if (strncmp(str, "md5", 3) == 0) 44 struct ima_template_desc *template_desc = ima_template_desc_current();
42 ima_hash = "md5"; 45 int i;
46
47 if (hash_setup_done)
48 return 1;
49
50 if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
51 if (strncmp(str, "sha1", 4) == 0)
52 ima_hash_algo = HASH_ALGO_SHA1;
53 else if (strncmp(str, "md5", 3) == 0)
54 ima_hash_algo = HASH_ALGO_MD5;
55 goto out;
56 }
57
58 for (i = 0; i < HASH_ALGO__LAST; i++) {
59 if (strcmp(str, hash_algo_name[i]) == 0) {
60 ima_hash_algo = i;
61 break;
62 }
63 }
64out:
65 hash_setup_done = 1;
43 return 1; 66 return 1;
44} 67}
45__setup("ima_hash=", hash_setup); 68__setup("ima_hash=", hash_setup);
@@ -92,10 +115,9 @@ out:
92 pathname = dentry->d_name.name; 115 pathname = dentry->d_name.name;
93 116
94 if (send_tomtou) 117 if (send_tomtou)
95 ima_add_violation(inode, pathname, 118 ima_add_violation(file, pathname, "invalid_pcr", "ToMToU");
96 "invalid_pcr", "ToMToU");
97 if (send_writers) 119 if (send_writers)
98 ima_add_violation(inode, pathname, 120 ima_add_violation(file, pathname,
99 "invalid_pcr", "open_writers"); 121 "invalid_pcr", "open_writers");
100 kfree(pathbuf); 122 kfree(pathbuf);
101} 123}
@@ -144,9 +166,12 @@ static int process_measurement(struct file *file, const char *filename,
144{ 166{
145 struct inode *inode = file_inode(file); 167 struct inode *inode = file_inode(file);
146 struct integrity_iint_cache *iint; 168 struct integrity_iint_cache *iint;
169 struct ima_template_desc *template_desc = ima_template_desc_current();
147 char *pathbuf = NULL; 170 char *pathbuf = NULL;
148 const char *pathname = NULL; 171 const char *pathname = NULL;
149 int rc = -ENOMEM, action, must_appraise, _func; 172 int rc = -ENOMEM, action, must_appraise, _func;
173 struct evm_ima_xattr_data *xattr_value = NULL, **xattr_ptr = NULL;
174 int xattr_len = 0;
150 175
151 if (!ima_initialized || !S_ISREG(inode->i_mode)) 176 if (!ima_initialized || !S_ISREG(inode->i_mode))
152 return 0; 177 return 0;
@@ -185,7 +210,13 @@ static int process_measurement(struct file *file, const char *filename,
185 goto out_digsig; 210 goto out_digsig;
186 } 211 }
187 212
188 rc = ima_collect_measurement(iint, file); 213 if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
214 if (action & IMA_APPRAISE_SUBMASK)
215 xattr_ptr = &xattr_value;
216 } else
217 xattr_ptr = &xattr_value;
218
219 rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len);
189 if (rc != 0) 220 if (rc != 0)
190 goto out_digsig; 221 goto out_digsig;
191 222
@@ -194,9 +225,11 @@ static int process_measurement(struct file *file, const char *filename,
194 pathname = (const char *)file->f_dentry->d_name.name; 225 pathname = (const char *)file->f_dentry->d_name.name;
195 226
196 if (action & IMA_MEASURE) 227 if (action & IMA_MEASURE)
197 ima_store_measurement(iint, file, pathname); 228 ima_store_measurement(iint, file, pathname,
229 xattr_value, xattr_len);
198 if (action & IMA_APPRAISE_SUBMASK) 230 if (action & IMA_APPRAISE_SUBMASK)
199 rc = ima_appraise_measurement(_func, iint, file, pathname); 231 rc = ima_appraise_measurement(_func, iint, file, pathname,
232 xattr_value, xattr_len);
200 if (action & IMA_AUDIT) 233 if (action & IMA_AUDIT)
201 ima_audit_measurement(iint, pathname); 234 ima_audit_measurement(iint, pathname);
202 kfree(pathbuf); 235 kfree(pathbuf);
@@ -205,6 +238,7 @@ out_digsig:
205 rc = -EACCES; 238 rc = -EACCES;
206out: 239out:
207 mutex_unlock(&inode->i_mutex); 240 mutex_unlock(&inode->i_mutex);
241 kfree(xattr_value);
208 if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE)) 242 if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
209 return -EACCES; 243 return -EACCES;
210 return 0; 244 return 0;
@@ -244,9 +278,9 @@ int ima_file_mmap(struct file *file, unsigned long prot)
244int ima_bprm_check(struct linux_binprm *bprm) 278int ima_bprm_check(struct linux_binprm *bprm)
245{ 279{
246 return process_measurement(bprm->file, 280 return process_measurement(bprm->file,
247 (strcmp(bprm->filename, bprm->interp) == 0) ? 281 (strcmp(bprm->filename, bprm->interp) == 0) ?
248 bprm->filename : bprm->interp, 282 bprm->filename : bprm->interp,
249 MAY_EXEC, BPRM_CHECK); 283 MAY_EXEC, BPRM_CHECK);
250} 284}
251 285
252/** 286/**
@@ -263,8 +297,8 @@ int ima_file_check(struct file *file, int mask)
263{ 297{
264 ima_rdwr_violation_check(file); 298 ima_rdwr_violation_check(file);
265 return process_measurement(file, NULL, 299 return process_measurement(file, NULL,
266 mask & (MAY_READ | MAY_WRITE | MAY_EXEC), 300 mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
267 FILE_CHECK); 301 FILE_CHECK);
268} 302}
269EXPORT_SYMBOL_GPL(ima_file_check); 303EXPORT_SYMBOL_GPL(ima_file_check);
270 304
@@ -294,6 +328,7 @@ static int __init init_ima(void)
294{ 328{
295 int error; 329 int error;
296 330
331 hash_setup(CONFIG_IMA_DEFAULT_HASH);
297 error = ima_init(); 332 error = ima_init();
298 if (!error) 333 if (!error)
299 ima_initialized = 1; 334 ima_initialized = 1;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 399433ad614e..a9c3d3cd1990 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -73,7 +73,6 @@ static struct ima_rule_entry default_rules[] = {
73 {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC}, 73 {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
74 {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC}, 74 {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
75 {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC}, 75 {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
76 {.action = DONT_MEASURE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
77 {.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC}, 76 {.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
78 {.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC}, 77 {.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
79 {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC}, 78 {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index ff63fe00c195..d85e99761f4f 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -50,7 +50,7 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
50 key = ima_hash_key(digest_value); 50 key = ima_hash_key(digest_value);
51 rcu_read_lock(); 51 rcu_read_lock();
52 hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) { 52 hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
53 rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE); 53 rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE);
54 if (rc == 0) { 54 if (rc == 0) {
55 ret = qe; 55 ret = qe;
56 break; 56 break;
@@ -104,9 +104,10 @@ static int ima_pcr_extend(const u8 *hash)
104 * and extend the pcr. 104 * and extend the pcr.
105 */ 105 */
106int ima_add_template_entry(struct ima_template_entry *entry, int violation, 106int ima_add_template_entry(struct ima_template_entry *entry, int violation,
107 const char *op, struct inode *inode) 107 const char *op, struct inode *inode,
108 const unsigned char *filename)
108{ 109{
109 u8 digest[IMA_DIGEST_SIZE]; 110 u8 digest[TPM_DIGEST_SIZE];
110 const char *audit_cause = "hash_added"; 111 const char *audit_cause = "hash_added";
111 char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX]; 112 char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
112 int audit_info = 1; 113 int audit_info = 1;
@@ -141,8 +142,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
141 } 142 }
142out: 143out:
143 mutex_unlock(&ima_extend_list_mutex); 144 mutex_unlock(&ima_extend_list_mutex);
144 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, 145 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
145 entry->template.file_name,
146 op, audit_cause, result, audit_info); 146 op, audit_cause, result, audit_info);
147 return result; 147 return result;
148} 148}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
new file mode 100644
index 000000000000..635695f6a185
--- /dev/null
+++ b/security/integrity/ima/ima_template.c
@@ -0,0 +1,187 @@
1/*
2 * Copyright (C) 2013 Politecnico di Torino, Italy
3 * TORSEC group -- http://security.polito.it
4 *
5 * Author: Roberto Sassu <roberto.sassu@polito.it>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation, version 2 of the
10 * License.
11 *
12 * File: ima_template.c
13 * Helpers to manage template descriptors.
14 */
15#include <crypto/hash_info.h>
16
17#include "ima.h"
18#include "ima_template_lib.h"
19
20static struct ima_template_desc defined_templates[] = {
21 {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
22 {.name = "ima-ng",.fmt = "d-ng|n-ng"},
23 {.name = "ima-sig",.fmt = "d-ng|n-ng|sig"},
24};
25
26static struct ima_template_field supported_fields[] = {
27 {.field_id = "d",.field_init = ima_eventdigest_init,
28 .field_show = ima_show_template_digest},
29 {.field_id = "n",.field_init = ima_eventname_init,
30 .field_show = ima_show_template_string},
31 {.field_id = "d-ng",.field_init = ima_eventdigest_ng_init,
32 .field_show = ima_show_template_digest_ng},
33 {.field_id = "n-ng",.field_init = ima_eventname_ng_init,
34 .field_show = ima_show_template_string},
35 {.field_id = "sig",.field_init = ima_eventsig_init,
36 .field_show = ima_show_template_sig},
37};
38
39static struct ima_template_desc *ima_template;
40static struct ima_template_desc *lookup_template_desc(const char *name);
41
42static int __init ima_template_setup(char *str)
43{
44 struct ima_template_desc *template_desc;
45 int template_len = strlen(str);
46
47 /*
48 * Verify that a template with the supplied name exists.
49 * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
50 */
51 template_desc = lookup_template_desc(str);
52 if (!template_desc)
53 return 1;
54
55 /*
56 * Verify whether the current hash algorithm is supported
57 * by the 'ima' template.
58 */
59 if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 &&
60 ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) {
61 pr_err("IMA: template does not support hash alg\n");
62 return 1;
63 }
64
65 ima_template = template_desc;
66 return 1;
67}
68__setup("ima_template=", ima_template_setup);
69
70static struct ima_template_desc *lookup_template_desc(const char *name)
71{
72 int i;
73
74 for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
75 if (strcmp(defined_templates[i].name, name) == 0)
76 return defined_templates + i;
77 }
78
79 return NULL;
80}
81
82static struct ima_template_field *lookup_template_field(const char *field_id)
83{
84 int i;
85
86 for (i = 0; i < ARRAY_SIZE(supported_fields); i++)
87 if (strncmp(supported_fields[i].field_id, field_id,
88 IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0)
89 return &supported_fields[i];
90 return NULL;
91}
92
93static int template_fmt_size(const char *template_fmt)
94{
95 char c;
96 int template_fmt_len = strlen(template_fmt);
97 int i = 0, j = 0;
98
99 while (i < template_fmt_len) {
100 c = template_fmt[i];
101 if (c == '|')
102 j++;
103 i++;
104 }
105
106 return j + 1;
107}
108
109static int template_desc_init_fields(const char *template_fmt,
110 struct ima_template_field ***fields,
111 int *num_fields)
112{
113 char *c, *template_fmt_copy, *template_fmt_ptr;
114 int template_num_fields = template_fmt_size(template_fmt);
115 int i, result = 0;
116
117 if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX)
118 return -EINVAL;
119
120 /* copying is needed as strsep() modifies the original buffer */
121 template_fmt_copy = kstrdup(template_fmt, GFP_KERNEL);
122 if (template_fmt_copy == NULL)
123 return -ENOMEM;
124
125 *fields = kzalloc(template_num_fields * sizeof(*fields), GFP_KERNEL);
126 if (*fields == NULL) {
127 result = -ENOMEM;
128 goto out;
129 }
130
131 template_fmt_ptr = template_fmt_copy;
132 for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL &&
133 i < template_num_fields; i++) {
134 struct ima_template_field *f = lookup_template_field(c);
135
136 if (!f) {
137 result = -ENOENT;
138 goto out;
139 }
140 (*fields)[i] = f;
141 }
142 *num_fields = i;
143out:
144 if (result < 0) {
145 kfree(*fields);
146 *fields = NULL;
147 }
148 kfree(template_fmt_copy);
149 return result;
150}
151
152static int init_defined_templates(void)
153{
154 int i = 0;
155 int result = 0;
156
157 /* Init defined templates. */
158 for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
159 struct ima_template_desc *template = &defined_templates[i];
160
161 result = template_desc_init_fields(template->fmt,
162 &(template->fields),
163 &(template->num_fields));
164 if (result < 0)
165 return result;
166 }
167 return result;
168}
169
170struct ima_template_desc *ima_template_desc_current(void)
171{
172 if (!ima_template)
173 ima_template =
174 lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
175 return ima_template;
176}
177
178int ima_init_template(void)
179{
180 int result;
181
182 result = init_defined_templates();
183 if (result < 0)
184 return result;
185
186 return 0;
187}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
new file mode 100644
index 000000000000..c38adcc910fb
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.c
@@ -0,0 +1,351 @@
1/*
2 * Copyright (C) 2013 Politecnico di Torino, Italy
3 * TORSEC group -- http://security.polito.it
4 *
5 * Author: Roberto Sassu <roberto.sassu@polito.it>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation, version 2 of the
10 * License.
11 *
12 * File: ima_template_lib.c
13 * Library of supported template fields.
14 */
15#include <crypto/hash_info.h>
16
17#include "ima_template_lib.h"
18
19static bool ima_template_hash_algo_allowed(u8 algo)
20{
21 if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5)
22 return true;
23
24 return false;
25}
26
27enum data_formats {
28 DATA_FMT_DIGEST = 0,
29 DATA_FMT_DIGEST_WITH_ALGO,
30 DATA_FMT_EVENT_NAME,
31 DATA_FMT_STRING,
32 DATA_FMT_HEX
33};
34
35static int ima_write_template_field_data(const void *data, const u32 datalen,
36 enum data_formats datafmt,
37 struct ima_field_data *field_data)
38{
39 u8 *buf, *buf_ptr;
40 u32 buflen;
41
42 switch (datafmt) {
43 case DATA_FMT_EVENT_NAME:
44 buflen = IMA_EVENT_NAME_LEN_MAX + 1;
45 break;
46 case DATA_FMT_STRING:
47 buflen = datalen + 1;
48 break;
49 default:
50 buflen = datalen;
51 }
52
53 buf = kzalloc(buflen, GFP_KERNEL);
54 if (!buf)
55 return -ENOMEM;
56
57 memcpy(buf, data, datalen);
58
59 /*
60 * Replace all space characters with underscore for event names and
61 * strings. This avoid that, during the parsing of a measurements list,
62 * filenames with spaces or that end with the suffix ' (deleted)' are
63 * split into multiple template fields (the space is the delimitator
64 * character for measurements lists in ASCII format).
65 */
66 if (datafmt == DATA_FMT_EVENT_NAME || datafmt == DATA_FMT_STRING) {
67 for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++)
68 if (*buf_ptr == ' ')
69 *buf_ptr = '_';
70 }
71
72 field_data->data = buf;
73 field_data->len = buflen;
74 return 0;
75}
76
77static void ima_show_template_data_ascii(struct seq_file *m,
78 enum ima_show_type show,
79 enum data_formats datafmt,
80 struct ima_field_data *field_data)
81{
82 u8 *buf_ptr = field_data->data, buflen = field_data->len;
83
84 switch (datafmt) {
85 case DATA_FMT_DIGEST_WITH_ALGO:
86 buf_ptr = strnchr(field_data->data, buflen, ':');
87 if (buf_ptr != field_data->data)
88 seq_printf(m, "%s", field_data->data);
89
90 /* skip ':' and '\0' */
91 buf_ptr += 2;
92 buflen -= buf_ptr - field_data->data;
93 case DATA_FMT_DIGEST:
94 case DATA_FMT_HEX:
95 if (!buflen)
96 break;
97 ima_print_digest(m, buf_ptr, buflen);
98 break;
99 case DATA_FMT_STRING:
100 seq_printf(m, "%s", buf_ptr);
101 break;
102 default:
103 break;
104 }
105}
106
107static void ima_show_template_data_binary(struct seq_file *m,
108 enum ima_show_type show,
109 enum data_formats datafmt,
110 struct ima_field_data *field_data)
111{
112 if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
113 ima_putc(m, &field_data->len, sizeof(u32));
114
115 if (!field_data->len)
116 return;
117
118 ima_putc(m, field_data->data, field_data->len);
119}
120
121static void ima_show_template_field_data(struct seq_file *m,
122 enum ima_show_type show,
123 enum data_formats datafmt,
124 struct ima_field_data *field_data)
125{
126 switch (show) {
127 case IMA_SHOW_ASCII:
128 ima_show_template_data_ascii(m, show, datafmt, field_data);
129 break;
130 case IMA_SHOW_BINARY:
131 case IMA_SHOW_BINARY_NO_FIELD_LEN:
132 ima_show_template_data_binary(m, show, datafmt, field_data);
133 break;
134 default:
135 break;
136 }
137}
138
139void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
140 struct ima_field_data *field_data)
141{
142 ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data);
143}
144
145void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
146 struct ima_field_data *field_data)
147{
148 ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO,
149 field_data);
150}
151
152void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
153 struct ima_field_data *field_data)
154{
155 ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data);
156}
157
158void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
159 struct ima_field_data *field_data)
160{
161 ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
162}
163
164static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo,
165 struct ima_field_data *field_data,
166 bool size_limit)
167{
168 /*
169 * digest formats:
170 * - DATA_FMT_DIGEST: digest
171 * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest,
172 * where <hash algo> is provided if the hash algoritm is not
173 * SHA1 or MD5
174 */
175 u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 };
176 enum data_formats fmt = DATA_FMT_DIGEST;
177 u32 offset = 0;
178
179 if (!size_limit) {
180 fmt = DATA_FMT_DIGEST_WITH_ALGO;
181 if (hash_algo < HASH_ALGO__LAST)
182 offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1,
183 "%s", hash_algo_name[hash_algo]);
184 buffer[offset] = ':';
185 offset += 2;
186 }
187
188 if (digest)
189 memcpy(buffer + offset, digest, digestsize);
190 else
191 /*
192 * If digest is NULL, the event being recorded is a violation.
193 * Make room for the digest by increasing the offset of
194 * IMA_DIGEST_SIZE.
195 */
196 offset += IMA_DIGEST_SIZE;
197
198 return ima_write_template_field_data(buffer, offset + digestsize,
199 fmt, field_data);
200}
201
202/*
203 * This function writes the digest of an event (with size limit).
204 */
205int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file,
206 const unsigned char *filename,
207 struct evm_ima_xattr_data *xattr_value, int xattr_len,
208 struct ima_field_data *field_data)
209{
210 struct {
211 struct ima_digest_data hdr;
212 char digest[IMA_MAX_DIGEST_SIZE];
213 } hash;
214 u8 *cur_digest = NULL;
215 u32 cur_digestsize = 0;
216 struct inode *inode;
217 int result;
218
219 memset(&hash, 0, sizeof(hash));
220
221 if (!iint) /* recording a violation. */
222 goto out;
223
224 if (ima_template_hash_algo_allowed(iint->ima_hash->algo)) {
225 cur_digest = iint->ima_hash->digest;
226 cur_digestsize = iint->ima_hash->length;
227 goto out;
228 }
229
230 if (!file) /* missing info to re-calculate the digest */
231 return -EINVAL;
232
233 inode = file_inode(file);
234 hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ?
235 ima_hash_algo : HASH_ALGO_SHA1;
236 result = ima_calc_file_hash(file, &hash.hdr);
237 if (result) {
238 integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
239 filename, "collect_data",
240 "failed", result, 0);
241 return result;
242 }
243 cur_digest = hash.hdr.digest;
244 cur_digestsize = hash.hdr.length;
245out:
246 return ima_eventdigest_init_common(cur_digest, cur_digestsize, -1,
247 field_data, true);
248}
249
250/*
251 * This function writes the digest of an event (without size limit).
252 */
253int ima_eventdigest_ng_init(struct integrity_iint_cache *iint,
254 struct file *file, const unsigned char *filename,
255 struct evm_ima_xattr_data *xattr_value,
256 int xattr_len, struct ima_field_data *field_data)
257{
258 u8 *cur_digest = NULL, hash_algo = HASH_ALGO__LAST;
259 u32 cur_digestsize = 0;
260
261 /* If iint is NULL, we are recording a violation. */
262 if (!iint)
263 goto out;
264
265 cur_digest = iint->ima_hash->digest;
266 cur_digestsize = iint->ima_hash->length;
267
268 hash_algo = iint->ima_hash->algo;
269out:
270 return ima_eventdigest_init_common(cur_digest, cur_digestsize,
271 hash_algo, field_data, false);
272}
273
274static int ima_eventname_init_common(struct integrity_iint_cache *iint,
275 struct file *file,
276 const unsigned char *filename,
277 struct ima_field_data *field_data,
278 bool size_limit)
279{
280 const char *cur_filename = NULL;
281 u32 cur_filename_len = 0;
282 enum data_formats fmt = size_limit ?
283 DATA_FMT_EVENT_NAME : DATA_FMT_STRING;
284
285 BUG_ON(filename == NULL && file == NULL);
286
287 if (filename) {
288 cur_filename = filename;
289 cur_filename_len = strlen(filename);
290
291 if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX)
292 goto out;
293 }
294
295 if (file) {
296 cur_filename = file->f_dentry->d_name.name;
297 cur_filename_len = strlen(cur_filename);
298 } else
299 /*
300 * Truncate filename if the latter is too long and
301 * the file descriptor is not available.
302 */
303 cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
304out:
305 return ima_write_template_field_data(cur_filename, cur_filename_len,
306 fmt, field_data);
307}
308
309/*
310 * This function writes the name of an event (with size limit).
311 */
312int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file,
313 const unsigned char *filename,
314 struct evm_ima_xattr_data *xattr_value, int xattr_len,
315 struct ima_field_data *field_data)
316{
317 return ima_eventname_init_common(iint, file, filename,
318 field_data, true);
319}
320
321/*
322 * This function writes the name of an event (without size limit).
323 */
324int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file,
325 const unsigned char *filename,
326 struct evm_ima_xattr_data *xattr_value, int xattr_len,
327 struct ima_field_data *field_data)
328{
329 return ima_eventname_init_common(iint, file, filename,
330 field_data, false);
331}
332
333/*
334 * ima_eventsig_init - include the file signature as part of the template data
335 */
336int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file,
337 const unsigned char *filename,
338 struct evm_ima_xattr_data *xattr_value, int xattr_len,
339 struct ima_field_data *field_data)
340{
341 enum data_formats fmt = DATA_FMT_HEX;
342 int rc = 0;
343
344 if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG))
345 goto out;
346
347 rc = ima_write_template_field_data(xattr_value, xattr_len, fmt,
348 field_data);
349out:
350 return rc;
351}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
new file mode 100644
index 000000000000..63f6b52cb1c2
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (C) 2013 Politecnico di Torino, Italy
3 * TORSEC group -- http://security.polito.it
4 *
5 * Author: Roberto Sassu <roberto.sassu@polito.it>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation, version 2 of the
10 * License.
11 *
12 * File: ima_template_lib.h
13 * Header for the library of supported template fields.
14 */
15#ifndef __LINUX_IMA_TEMPLATE_LIB_H
16#define __LINUX_IMA_TEMPLATE_LIB_H
17
18#include <linux/seq_file.h>
19#include "ima.h"
20
21void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
22 struct ima_field_data *field_data);
23void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
24 struct ima_field_data *field_data);
25void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
26 struct ima_field_data *field_data);
27void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
28 struct ima_field_data *field_data);
29int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file,
30 const unsigned char *filename,
31 struct evm_ima_xattr_data *xattr_value, int xattr_len,
32 struct ima_field_data *field_data);
33int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file,
34 const unsigned char *filename,
35 struct evm_ima_xattr_data *xattr_value, int xattr_len,
36 struct ima_field_data *field_data);
37int ima_eventdigest_ng_init(struct integrity_iint_cache *iint,
38 struct file *file, const unsigned char *filename,
39 struct evm_ima_xattr_data *xattr_value,
40 int xattr_len, struct ima_field_data *field_data);
41int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file,
42 const unsigned char *filename,
43 struct evm_ima_xattr_data *xattr_value, int xattr_len,
44 struct ima_field_data *field_data);
45int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file,
46 const unsigned char *filename,
47 struct evm_ima_xattr_data *xattr_value, int xattr_len,
48 struct ima_field_data *field_data);
49#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index c42fb7a70dee..2fb5e53e927f 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -54,25 +54,57 @@ enum evm_ima_xattr_type {
54 IMA_XATTR_DIGEST = 0x01, 54 IMA_XATTR_DIGEST = 0x01,
55 EVM_XATTR_HMAC, 55 EVM_XATTR_HMAC,
56 EVM_IMA_XATTR_DIGSIG, 56 EVM_IMA_XATTR_DIGSIG,
57 IMA_XATTR_DIGEST_NG,
57}; 58};
58 59
59struct evm_ima_xattr_data { 60struct evm_ima_xattr_data {
60 u8 type; 61 u8 type;
61 u8 digest[SHA1_DIGEST_SIZE]; 62 u8 digest[SHA1_DIGEST_SIZE];
62} __attribute__((packed)); 63} __packed;
64
65#define IMA_MAX_DIGEST_SIZE 64
66
67struct ima_digest_data {
68 u8 algo;
69 u8 length;
70 union {
71 struct {
72 u8 unused;
73 u8 type;
74 } sha1;
75 struct {
76 u8 type;
77 u8 algo;
78 } ng;
79 u8 data[2];
80 } xattr;
81 u8 digest[0];
82} __packed;
83
84/*
85 * signature format v2 - for using with asymmetric keys
86 */
87struct signature_v2_hdr {
88 uint8_t type; /* xattr type */
89 uint8_t version; /* signature format version */
90 uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */
91 uint32_t keyid; /* IMA key identifier - not X509/PGP specific */
92 uint16_t sig_size; /* signature size */
93 uint8_t sig[0]; /* signature payload */
94} __packed;
63 95
64/* integrity data associated with an inode */ 96/* integrity data associated with an inode */
65struct integrity_iint_cache { 97struct integrity_iint_cache {
66 struct rb_node rb_node; /* rooted in integrity_iint_tree */ 98 struct rb_node rb_node; /* rooted in integrity_iint_tree */
67 struct inode *inode; /* back pointer to inode in question */ 99 struct inode *inode; /* back pointer to inode in question */
68 u64 version; /* track inode changes */ 100 u64 version; /* track inode changes */
69 unsigned long flags; 101 unsigned long flags;
70 struct evm_ima_xattr_data ima_xattr;
71 enum integrity_status ima_file_status:4; 102 enum integrity_status ima_file_status:4;
72 enum integrity_status ima_mmap_status:4; 103 enum integrity_status ima_mmap_status:4;
73 enum integrity_status ima_bprm_status:4; 104 enum integrity_status ima_bprm_status:4;
74 enum integrity_status ima_module_status:4; 105 enum integrity_status ima_module_status:4;
75 enum integrity_status evm_status:4; 106 enum integrity_status evm_status:4;
107 struct ima_digest_data *ima_hash;
76}; 108};
77 109
78/* rbtree tree calls to lookup, insert, delete 110/* rbtree tree calls to lookup, insert, delete
@@ -89,7 +121,7 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
89#ifdef CONFIG_INTEGRITY_SIGNATURE 121#ifdef CONFIG_INTEGRITY_SIGNATURE
90 122
91int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen, 123int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
92 const char *digest, int digestlen); 124 const char *digest, int digestlen);
93 125
94#else 126#else
95 127
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index a90d6d300dbd..a4f3f8c48d6e 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -4,6 +4,7 @@
4 4
5config KEYS 5config KEYS
6 bool "Enable access key retention support" 6 bool "Enable access key retention support"
7 select ASSOCIATIVE_ARRAY
7 help 8 help
8 This option provides support for retaining authentication tokens and 9 This option provides support for retaining authentication tokens and
9 access keys in the kernel. 10 access keys in the kernel.
@@ -19,6 +20,34 @@ config KEYS
19 20
20 If you are unsure as to whether this is required, answer N. 21 If you are unsure as to whether this is required, answer N.
21 22
23config PERSISTENT_KEYRINGS
24 bool "Enable register of persistent per-UID keyrings"
25 depends on KEYS
26 help
27 This option provides a register of persistent per-UID keyrings,
28 primarily aimed at Kerberos key storage. The keyrings are persistent
29 in the sense that they stay around after all processes of that UID
30 have exited, not that they survive the machine being rebooted.
31
32 A particular keyring may be accessed by either the user whose keyring
33 it is or by a process with administrative privileges. The active
34 LSMs gets to rule on which admin-level processes get to access the
35 cache.
36
37 Keyrings are created and added into the register upon demand and get
38 removed if they expire (a default timeout is set upon creation).
39
40config BIG_KEYS
41 bool "Large payload keys"
42 depends on KEYS
43 depends on TMPFS
44 help
45 This option provides support for holding large keys within the kernel
46 (for example Kerberos ticket caches). The data may be stored out to
47 swapspace by tmpfs.
48
49 If you are unsure as to whether this is required, answer N.
50
22config TRUSTED_KEYS 51config TRUSTED_KEYS
23 tristate "TRUSTED KEYS" 52 tristate "TRUSTED KEYS"
24 depends on KEYS && TCG_TPM 53 depends on KEYS && TCG_TPM
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 504aaa008388..dfb3a7bededf 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -18,9 +18,11 @@ obj-y := \
18obj-$(CONFIG_KEYS_COMPAT) += compat.o 18obj-$(CONFIG_KEYS_COMPAT) += compat.o
19obj-$(CONFIG_PROC_FS) += proc.o 19obj-$(CONFIG_PROC_FS) += proc.o
20obj-$(CONFIG_SYSCTL) += sysctl.o 20obj-$(CONFIG_SYSCTL) += sysctl.o
21obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
21 22
22# 23#
23# Key types 24# Key types
24# 25#
26obj-$(CONFIG_BIG_KEYS) += big_key.o
25obj-$(CONFIG_TRUSTED_KEYS) += trusted.o 27obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
26obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/ 28obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
new file mode 100644
index 000000000000..8137b27d641d
--- /dev/null
+++ b/security/keys/big_key.c
@@ -0,0 +1,207 @@
1/* Large capacity key type
2 *
3 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/seq_file.h>
15#include <linux/file.h>
16#include <linux/shmem_fs.h>
17#include <linux/err.h>
18#include <keys/user-type.h>
19#include <keys/big_key-type.h>
20
21MODULE_LICENSE("GPL");
22
23/*
24 * If the data is under this limit, there's no point creating a shm file to
25 * hold it as the permanently resident metadata for the shmem fs will be at
26 * least as large as the data.
27 */
28#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
29
30/*
31 * big_key defined keys take an arbitrary string as the description and an
32 * arbitrary blob of data as the payload
33 */
34struct key_type key_type_big_key = {
35 .name = "big_key",
36 .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
37 .instantiate = big_key_instantiate,
38 .match = user_match,
39 .revoke = big_key_revoke,
40 .destroy = big_key_destroy,
41 .describe = big_key_describe,
42 .read = big_key_read,
43};
44
45/*
46 * Instantiate a big key
47 */
48int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
49{
50 struct path *path = (struct path *)&key->payload.data2;
51 struct file *file;
52 ssize_t written;
53 size_t datalen = prep->datalen;
54 int ret;
55
56 ret = -EINVAL;
57 if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
58 goto error;
59
60 /* Set an arbitrary quota */
61 ret = key_payload_reserve(key, 16);
62 if (ret < 0)
63 goto error;
64
65 key->type_data.x[1] = datalen;
66
67 if (datalen > BIG_KEY_FILE_THRESHOLD) {
68 /* Create a shmem file to store the data in. This will permit the data
69 * to be swapped out if needed.
70 *
71 * TODO: Encrypt the stored data with a temporary key.
72 */
73 file = shmem_kernel_file_setup("", datalen, 0);
74 if (IS_ERR(file)) {
75 ret = PTR_ERR(file);
76 goto err_quota;
77 }
78
79 written = kernel_write(file, prep->data, prep->datalen, 0);
80 if (written != datalen) {
81 ret = written;
82 if (written >= 0)
83 ret = -ENOMEM;
84 goto err_fput;
85 }
86
87 /* Pin the mount and dentry to the key so that we can open it again
88 * later
89 */
90 *path = file->f_path;
91 path_get(path);
92 fput(file);
93 } else {
94 /* Just store the data in a buffer */
95 void *data = kmalloc(datalen, GFP_KERNEL);
96 if (!data) {
97 ret = -ENOMEM;
98 goto err_quota;
99 }
100
101 key->payload.data = memcpy(data, prep->data, prep->datalen);
102 }
103 return 0;
104
105err_fput:
106 fput(file);
107err_quota:
108 key_payload_reserve(key, 0);
109error:
110 return ret;
111}
112
113/*
114 * dispose of the links from a revoked keyring
115 * - called with the key sem write-locked
116 */
117void big_key_revoke(struct key *key)
118{
119 struct path *path = (struct path *)&key->payload.data2;
120
121 /* clear the quota */
122 key_payload_reserve(key, 0);
123 if (key_is_instantiated(key) && key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD)
124 vfs_truncate(path, 0);
125}
126
127/*
128 * dispose of the data dangling from the corpse of a big_key key
129 */
130void big_key_destroy(struct key *key)
131{
132 if (key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) {
133 struct path *path = (struct path *)&key->payload.data2;
134 path_put(path);
135 path->mnt = NULL;
136 path->dentry = NULL;
137 } else {
138 kfree(key->payload.data);
139 key->payload.data = NULL;
140 }
141}
142
143/*
144 * describe the big_key key
145 */
146void big_key_describe(const struct key *key, struct seq_file *m)
147{
148 unsigned long datalen = key->type_data.x[1];
149
150 seq_puts(m, key->description);
151
152 if (key_is_instantiated(key))
153 seq_printf(m, ": %lu [%s]",
154 datalen,
155 datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
156}
157
158/*
159 * read the key data
160 * - the key's semaphore is read-locked
161 */
162long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
163{
164 unsigned long datalen = key->type_data.x[1];
165 long ret;
166
167 if (!buffer || buflen < datalen)
168 return datalen;
169
170 if (datalen > BIG_KEY_FILE_THRESHOLD) {
171 struct path *path = (struct path *)&key->payload.data2;
172 struct file *file;
173 loff_t pos;
174
175 file = dentry_open(path, O_RDONLY, current_cred());
176 if (IS_ERR(file))
177 return PTR_ERR(file);
178
179 pos = 0;
180 ret = vfs_read(file, buffer, datalen, &pos);
181 fput(file);
182 if (ret >= 0 && ret != datalen)
183 ret = -EIO;
184 } else {
185 ret = datalen;
186 if (copy_to_user(buffer, key->payload.data, datalen) != 0)
187 ret = -EFAULT;
188 }
189
190 return ret;
191}
192
193/*
194 * Module stuff
195 */
196static int __init big_key_init(void)
197{
198 return register_key_type(&key_type_big_key);
199}
200
201static void __exit big_key_cleanup(void)
202{
203 unregister_key_type(&key_type_big_key);
204}
205
206module_init(big_key_init);
207module_exit(big_key_cleanup);
diff --git a/security/keys/compat.c b/security/keys/compat.c
index d65fa7fa29ba..bbd32c729dbb 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -138,6 +138,9 @@ asmlinkage long compat_sys_keyctl(u32 option,
138 case KEYCTL_INVALIDATE: 138 case KEYCTL_INVALIDATE:
139 return keyctl_invalidate_key(arg2); 139 return keyctl_invalidate_key(arg2);
140 140
141 case KEYCTL_GET_PERSISTENT:
142 return keyctl_get_persistent(arg2, arg3);
143
141 default: 144 default:
142 return -EOPNOTSUPP; 145 return -EOPNOTSUPP;
143 } 146 }
diff --git a/security/keys/gc.c b/security/keys/gc.c
index d67c97bb1025..d3222b6d7d59 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -131,50 +131,6 @@ void key_gc_keytype(struct key_type *ktype)
131} 131}
132 132
133/* 133/*
134 * Garbage collect pointers from a keyring.
135 *
136 * Not called with any locks held. The keyring's key struct will not be
137 * deallocated under us as only our caller may deallocate it.
138 */
139static void key_gc_keyring(struct key *keyring, time_t limit)
140{
141 struct keyring_list *klist;
142 int loop;
143
144 kenter("%x", key_serial(keyring));
145
146 if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
147 (1 << KEY_FLAG_REVOKED)))
148 goto dont_gc;
149
150 /* scan the keyring looking for dead keys */
151 rcu_read_lock();
152 klist = rcu_dereference(keyring->payload.subscriptions);
153 if (!klist)
154 goto unlock_dont_gc;
155
156 loop = klist->nkeys;
157 smp_rmb();
158 for (loop--; loop >= 0; loop--) {
159 struct key *key = rcu_dereference(klist->keys[loop]);
160 if (key_is_dead(key, limit))
161 goto do_gc;
162 }
163
164unlock_dont_gc:
165 rcu_read_unlock();
166dont_gc:
167 kleave(" [no gc]");
168 return;
169
170do_gc:
171 rcu_read_unlock();
172
173 keyring_gc(keyring, limit);
174 kleave(" [gc]");
175}
176
177/*
178 * Garbage collect a list of unreferenced, detached keys 134 * Garbage collect a list of unreferenced, detached keys
179 */ 135 */
180static noinline void key_gc_unused_keys(struct list_head *keys) 136static noinline void key_gc_unused_keys(struct list_head *keys)
@@ -392,8 +348,7 @@ found_unreferenced_key:
392 */ 348 */
393found_keyring: 349found_keyring:
394 spin_unlock(&key_serial_lock); 350 spin_unlock(&key_serial_lock);
395 kdebug("scan keyring %d", key->serial); 351 keyring_gc(key, limit);
396 key_gc_keyring(key, limit);
397 goto maybe_resched; 352 goto maybe_resched;
398 353
399 /* We found a dead key that is still referenced. Reset its type and 354 /* We found a dead key that is still referenced. Reset its type and
diff --git a/security/keys/internal.h b/security/keys/internal.h
index d4f1468b9b50..80b2aac4f50c 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -89,42 +89,53 @@ extern struct key_type *key_type_lookup(const char *type);
89extern void key_type_put(struct key_type *ktype); 89extern void key_type_put(struct key_type *ktype);
90 90
91extern int __key_link_begin(struct key *keyring, 91extern int __key_link_begin(struct key *keyring,
92 const struct key_type *type, 92 const struct keyring_index_key *index_key,
93 const char *description, 93 struct assoc_array_edit **_edit);
94 unsigned long *_prealloc);
95extern int __key_link_check_live_key(struct key *keyring, struct key *key); 94extern int __key_link_check_live_key(struct key *keyring, struct key *key);
96extern void __key_link(struct key *keyring, struct key *key, 95extern void __key_link(struct key *key, struct assoc_array_edit **_edit);
97 unsigned long *_prealloc);
98extern void __key_link_end(struct key *keyring, 96extern void __key_link_end(struct key *keyring,
99 struct key_type *type, 97 const struct keyring_index_key *index_key,
100 unsigned long prealloc); 98 struct assoc_array_edit *edit);
101 99
102extern key_ref_t __keyring_search_one(key_ref_t keyring_ref, 100extern key_ref_t find_key_to_update(key_ref_t keyring_ref,
103 const struct key_type *type, 101 const struct keyring_index_key *index_key);
104 const char *description,
105 key_perm_t perm);
106 102
107extern struct key *keyring_search_instkey(struct key *keyring, 103extern struct key *keyring_search_instkey(struct key *keyring,
108 key_serial_t target_id); 104 key_serial_t target_id);
109 105
106extern int iterate_over_keyring(const struct key *keyring,
107 int (*func)(const struct key *key, void *data),
108 void *data);
109
110typedef int (*key_match_func_t)(const struct key *, const void *); 110typedef int (*key_match_func_t)(const struct key *, const void *);
111 111
112struct keyring_search_context {
113 struct keyring_index_key index_key;
114 const struct cred *cred;
115 key_match_func_t match;
116 const void *match_data;
117 unsigned flags;
118#define KEYRING_SEARCH_LOOKUP_TYPE 0x0001 /* [as type->def_lookup_type] */
119#define KEYRING_SEARCH_NO_STATE_CHECK 0x0002 /* Skip state checks */
120#define KEYRING_SEARCH_DO_STATE_CHECK 0x0004 /* Override NO_STATE_CHECK */
121#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0008 /* Don't update times */
122#define KEYRING_SEARCH_NO_CHECK_PERM 0x0010 /* Don't check permissions */
123#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0020 /* Give an error on excessive depth */
124
125 int (*iterator)(const void *object, void *iterator_data);
126
127 /* Internal stuff */
128 int skipped_ret;
129 bool possessed;
130 key_ref_t result;
131 struct timespec now;
132};
133
112extern key_ref_t keyring_search_aux(key_ref_t keyring_ref, 134extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
113 const struct cred *cred, 135 struct keyring_search_context *ctx);
114 struct key_type *type, 136
115 const void *description, 137extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
116 key_match_func_t match, 138extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
117 bool no_state_check);
118
119extern key_ref_t search_my_process_keyrings(struct key_type *type,
120 const void *description,
121 key_match_func_t match,
122 bool no_state_check,
123 const struct cred *cred);
124extern key_ref_t search_process_keyrings(struct key_type *type,
125 const void *description,
126 key_match_func_t match,
127 const struct cred *cred);
128 139
129extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check); 140extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
130 141
@@ -202,7 +213,7 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
202/* 213/*
203 * Determine whether a key is dead. 214 * Determine whether a key is dead.
204 */ 215 */
205static inline bool key_is_dead(struct key *key, time_t limit) 216static inline bool key_is_dead(const struct key *key, time_t limit)
206{ 217{
207 return 218 return
208 key->flags & ((1 << KEY_FLAG_DEAD) | 219 key->flags & ((1 << KEY_FLAG_DEAD) |
@@ -244,6 +255,15 @@ extern long keyctl_invalidate_key(key_serial_t);
244extern long keyctl_instantiate_key_common(key_serial_t, 255extern long keyctl_instantiate_key_common(key_serial_t,
245 const struct iovec *, 256 const struct iovec *,
246 unsigned, size_t, key_serial_t); 257 unsigned, size_t, key_serial_t);
258#ifdef CONFIG_PERSISTENT_KEYRINGS
259extern long keyctl_get_persistent(uid_t, key_serial_t);
260extern unsigned persistent_keyring_expiry;
261#else
262static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
263{
264 return -EOPNOTSUPP;
265}
266#endif
247 267
248/* 268/*
249 * Debugging key validation 269 * Debugging key validation
diff --git a/security/keys/key.c b/security/keys/key.c
index 8fb7c7bd4657..6e21c11e48bc 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -242,8 +242,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
242 } 242 }
243 } 243 }
244 244
245 desclen = strlen(desc) + 1; 245 desclen = strlen(desc);
246 quotalen = desclen + type->def_datalen; 246 quotalen = desclen + 1 + type->def_datalen;
247 247
248 /* get hold of the key tracking for this user */ 248 /* get hold of the key tracking for this user */
249 user = key_user_lookup(uid); 249 user = key_user_lookup(uid);
@@ -272,12 +272,13 @@ struct key *key_alloc(struct key_type *type, const char *desc,
272 } 272 }
273 273
274 /* allocate and initialise the key and its description */ 274 /* allocate and initialise the key and its description */
275 key = kmem_cache_alloc(key_jar, GFP_KERNEL); 275 key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
276 if (!key) 276 if (!key)
277 goto no_memory_2; 277 goto no_memory_2;
278 278
279 if (desc) { 279 if (desc) {
280 key->description = kmemdup(desc, desclen, GFP_KERNEL); 280 key->index_key.desc_len = desclen;
281 key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
281 if (!key->description) 282 if (!key->description)
282 goto no_memory_3; 283 goto no_memory_3;
283 } 284 }
@@ -285,22 +286,18 @@ struct key *key_alloc(struct key_type *type, const char *desc,
285 atomic_set(&key->usage, 1); 286 atomic_set(&key->usage, 1);
286 init_rwsem(&key->sem); 287 init_rwsem(&key->sem);
287 lockdep_set_class(&key->sem, &type->lock_class); 288 lockdep_set_class(&key->sem, &type->lock_class);
288 key->type = type; 289 key->index_key.type = type;
289 key->user = user; 290 key->user = user;
290 key->quotalen = quotalen; 291 key->quotalen = quotalen;
291 key->datalen = type->def_datalen; 292 key->datalen = type->def_datalen;
292 key->uid = uid; 293 key->uid = uid;
293 key->gid = gid; 294 key->gid = gid;
294 key->perm = perm; 295 key->perm = perm;
295 key->flags = 0;
296 key->expiry = 0;
297 key->payload.data = NULL;
298 key->security = NULL;
299 296
300 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) 297 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
301 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 298 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
302 299 if (flags & KEY_ALLOC_TRUSTED)
303 memset(&key->type_data, 0, sizeof(key->type_data)); 300 key->flags |= 1 << KEY_FLAG_TRUSTED;
304 301
305#ifdef KEY_DEBUGGING 302#ifdef KEY_DEBUGGING
306 key->magic = KEY_DEBUG_MAGIC; 303 key->magic = KEY_DEBUG_MAGIC;
@@ -408,7 +405,7 @@ static int __key_instantiate_and_link(struct key *key,
408 struct key_preparsed_payload *prep, 405 struct key_preparsed_payload *prep,
409 struct key *keyring, 406 struct key *keyring,
410 struct key *authkey, 407 struct key *authkey,
411 unsigned long *_prealloc) 408 struct assoc_array_edit **_edit)
412{ 409{
413 int ret, awaken; 410 int ret, awaken;
414 411
@@ -435,7 +432,7 @@ static int __key_instantiate_and_link(struct key *key,
435 432
436 /* and link it into the destination keyring */ 433 /* and link it into the destination keyring */
437 if (keyring) 434 if (keyring)
438 __key_link(keyring, key, _prealloc); 435 __key_link(key, _edit);
439 436
440 /* disable the authorisation key */ 437 /* disable the authorisation key */
441 if (authkey) 438 if (authkey)
@@ -475,7 +472,7 @@ int key_instantiate_and_link(struct key *key,
475 struct key *authkey) 472 struct key *authkey)
476{ 473{
477 struct key_preparsed_payload prep; 474 struct key_preparsed_payload prep;
478 unsigned long prealloc; 475 struct assoc_array_edit *edit;
479 int ret; 476 int ret;
480 477
481 memset(&prep, 0, sizeof(prep)); 478 memset(&prep, 0, sizeof(prep));
@@ -489,17 +486,15 @@ int key_instantiate_and_link(struct key *key,
489 } 486 }
490 487
491 if (keyring) { 488 if (keyring) {
492 ret = __key_link_begin(keyring, key->type, key->description, 489 ret = __key_link_begin(keyring, &key->index_key, &edit);
493 &prealloc);
494 if (ret < 0) 490 if (ret < 0)
495 goto error_free_preparse; 491 goto error_free_preparse;
496 } 492 }
497 493
498 ret = __key_instantiate_and_link(key, &prep, keyring, authkey, 494 ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
499 &prealloc);
500 495
501 if (keyring) 496 if (keyring)
502 __key_link_end(keyring, key->type, prealloc); 497 __key_link_end(keyring, &key->index_key, edit);
503 498
504error_free_preparse: 499error_free_preparse:
505 if (key->type->preparse) 500 if (key->type->preparse)
@@ -537,7 +532,7 @@ int key_reject_and_link(struct key *key,
537 struct key *keyring, 532 struct key *keyring,
538 struct key *authkey) 533 struct key *authkey)
539{ 534{
540 unsigned long prealloc; 535 struct assoc_array_edit *edit;
541 struct timespec now; 536 struct timespec now;
542 int ret, awaken, link_ret = 0; 537 int ret, awaken, link_ret = 0;
543 538
@@ -548,8 +543,7 @@ int key_reject_and_link(struct key *key,
548 ret = -EBUSY; 543 ret = -EBUSY;
549 544
550 if (keyring) 545 if (keyring)
551 link_ret = __key_link_begin(keyring, key->type, 546 link_ret = __key_link_begin(keyring, &key->index_key, &edit);
552 key->description, &prealloc);
553 547
554 mutex_lock(&key_construction_mutex); 548 mutex_lock(&key_construction_mutex);
555 549
@@ -557,9 +551,10 @@ int key_reject_and_link(struct key *key,
557 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 551 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
558 /* mark the key as being negatively instantiated */ 552 /* mark the key as being negatively instantiated */
559 atomic_inc(&key->user->nikeys); 553 atomic_inc(&key->user->nikeys);
554 key->type_data.reject_error = -error;
555 smp_wmb();
560 set_bit(KEY_FLAG_NEGATIVE, &key->flags); 556 set_bit(KEY_FLAG_NEGATIVE, &key->flags);
561 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 557 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
562 key->type_data.reject_error = -error;
563 now = current_kernel_time(); 558 now = current_kernel_time();
564 key->expiry = now.tv_sec + timeout; 559 key->expiry = now.tv_sec + timeout;
565 key_schedule_gc(key->expiry + key_gc_delay); 560 key_schedule_gc(key->expiry + key_gc_delay);
@@ -571,7 +566,7 @@ int key_reject_and_link(struct key *key,
571 566
572 /* and link it into the destination keyring */ 567 /* and link it into the destination keyring */
573 if (keyring && link_ret == 0) 568 if (keyring && link_ret == 0)
574 __key_link(keyring, key, &prealloc); 569 __key_link(key, &edit);
575 570
576 /* disable the authorisation key */ 571 /* disable the authorisation key */
577 if (authkey) 572 if (authkey)
@@ -581,7 +576,7 @@ int key_reject_and_link(struct key *key,
581 mutex_unlock(&key_construction_mutex); 576 mutex_unlock(&key_construction_mutex);
582 577
583 if (keyring) 578 if (keyring)
584 __key_link_end(keyring, key->type, prealloc); 579 __key_link_end(keyring, &key->index_key, edit);
585 580
586 /* wake up anyone waiting for a key to be constructed */ 581 /* wake up anyone waiting for a key to be constructed */
587 if (awaken) 582 if (awaken)
@@ -645,7 +640,7 @@ found:
645 /* this races with key_put(), but that doesn't matter since key_put() 640 /* this races with key_put(), but that doesn't matter since key_put()
646 * doesn't actually change the key 641 * doesn't actually change the key
647 */ 642 */
648 atomic_inc(&key->usage); 643 __key_get(key);
649 644
650error: 645error:
651 spin_unlock(&key_serial_lock); 646 spin_unlock(&key_serial_lock);
@@ -780,25 +775,27 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
780 key_perm_t perm, 775 key_perm_t perm,
781 unsigned long flags) 776 unsigned long flags)
782{ 777{
783 unsigned long prealloc; 778 struct keyring_index_key index_key = {
779 .description = description,
780 };
784 struct key_preparsed_payload prep; 781 struct key_preparsed_payload prep;
782 struct assoc_array_edit *edit;
785 const struct cred *cred = current_cred(); 783 const struct cred *cred = current_cred();
786 struct key_type *ktype;
787 struct key *keyring, *key = NULL; 784 struct key *keyring, *key = NULL;
788 key_ref_t key_ref; 785 key_ref_t key_ref;
789 int ret; 786 int ret;
790 787
791 /* look up the key type to see if it's one of the registered kernel 788 /* look up the key type to see if it's one of the registered kernel
792 * types */ 789 * types */
793 ktype = key_type_lookup(type); 790 index_key.type = key_type_lookup(type);
794 if (IS_ERR(ktype)) { 791 if (IS_ERR(index_key.type)) {
795 key_ref = ERR_PTR(-ENODEV); 792 key_ref = ERR_PTR(-ENODEV);
796 goto error; 793 goto error;
797 } 794 }
798 795
799 key_ref = ERR_PTR(-EINVAL); 796 key_ref = ERR_PTR(-EINVAL);
800 if (!ktype->match || !ktype->instantiate || 797 if (!index_key.type->match || !index_key.type->instantiate ||
801 (!description && !ktype->preparse)) 798 (!index_key.description && !index_key.type->preparse))
802 goto error_put_type; 799 goto error_put_type;
803 800
804 keyring = key_ref_to_ptr(keyring_ref); 801 keyring = key_ref_to_ptr(keyring_ref);
@@ -812,21 +809,28 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
812 memset(&prep, 0, sizeof(prep)); 809 memset(&prep, 0, sizeof(prep));
813 prep.data = payload; 810 prep.data = payload;
814 prep.datalen = plen; 811 prep.datalen = plen;
815 prep.quotalen = ktype->def_datalen; 812 prep.quotalen = index_key.type->def_datalen;
816 if (ktype->preparse) { 813 prep.trusted = flags & KEY_ALLOC_TRUSTED;
817 ret = ktype->preparse(&prep); 814 if (index_key.type->preparse) {
815 ret = index_key.type->preparse(&prep);
818 if (ret < 0) { 816 if (ret < 0) {
819 key_ref = ERR_PTR(ret); 817 key_ref = ERR_PTR(ret);
820 goto error_put_type; 818 goto error_put_type;
821 } 819 }
822 if (!description) 820 if (!index_key.description)
823 description = prep.description; 821 index_key.description = prep.description;
824 key_ref = ERR_PTR(-EINVAL); 822 key_ref = ERR_PTR(-EINVAL);
825 if (!description) 823 if (!index_key.description)
826 goto error_free_prep; 824 goto error_free_prep;
827 } 825 }
826 index_key.desc_len = strlen(index_key.description);
827
828 key_ref = ERR_PTR(-EPERM);
829 if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags))
830 goto error_free_prep;
831 flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0;
828 832
829 ret = __key_link_begin(keyring, ktype, description, &prealloc); 833 ret = __key_link_begin(keyring, &index_key, &edit);
830 if (ret < 0) { 834 if (ret < 0) {
831 key_ref = ERR_PTR(ret); 835 key_ref = ERR_PTR(ret);
832 goto error_free_prep; 836 goto error_free_prep;
@@ -844,10 +848,9 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
844 * key of the same type and description in the destination keyring and 848 * key of the same type and description in the destination keyring and
845 * update that instead if possible 849 * update that instead if possible
846 */ 850 */
847 if (ktype->update) { 851 if (index_key.type->update) {
848 key_ref = __keyring_search_one(keyring_ref, ktype, description, 852 key_ref = find_key_to_update(keyring_ref, &index_key);
849 0); 853 if (key_ref)
850 if (!IS_ERR(key_ref))
851 goto found_matching_key; 854 goto found_matching_key;
852 } 855 }
853 856
@@ -856,23 +859,24 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
856 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; 859 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
857 perm |= KEY_USR_VIEW; 860 perm |= KEY_USR_VIEW;
858 861
859 if (ktype->read) 862 if (index_key.type->read)
860 perm |= KEY_POS_READ; 863 perm |= KEY_POS_READ;
861 864
862 if (ktype == &key_type_keyring || ktype->update) 865 if (index_key.type == &key_type_keyring ||
866 index_key.type->update)
863 perm |= KEY_POS_WRITE; 867 perm |= KEY_POS_WRITE;
864 } 868 }
865 869
866 /* allocate a new key */ 870 /* allocate a new key */
867 key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred, 871 key = key_alloc(index_key.type, index_key.description,
868 perm, flags); 872 cred->fsuid, cred->fsgid, cred, perm, flags);
869 if (IS_ERR(key)) { 873 if (IS_ERR(key)) {
870 key_ref = ERR_CAST(key); 874 key_ref = ERR_CAST(key);
871 goto error_link_end; 875 goto error_link_end;
872 } 876 }
873 877
874 /* instantiate it and link it into the target keyring */ 878 /* instantiate it and link it into the target keyring */
875 ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &prealloc); 879 ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
876 if (ret < 0) { 880 if (ret < 0) {
877 key_put(key); 881 key_put(key);
878 key_ref = ERR_PTR(ret); 882 key_ref = ERR_PTR(ret);
@@ -882,12 +886,12 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
882 key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); 886 key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
883 887
884error_link_end: 888error_link_end:
885 __key_link_end(keyring, ktype, prealloc); 889 __key_link_end(keyring, &index_key, edit);
886error_free_prep: 890error_free_prep:
887 if (ktype->preparse) 891 if (index_key.type->preparse)
888 ktype->free_preparse(&prep); 892 index_key.type->free_preparse(&prep);
889error_put_type: 893error_put_type:
890 key_type_put(ktype); 894 key_type_put(index_key.type);
891error: 895error:
892 return key_ref; 896 return key_ref;
893 897
@@ -895,7 +899,7 @@ error:
895 /* we found a matching key, so we're going to try to update it 899 /* we found a matching key, so we're going to try to update it
896 * - we can drop the locks first as we have the key pinned 900 * - we can drop the locks first as we have the key pinned
897 */ 901 */
898 __key_link_end(keyring, ktype, prealloc); 902 __key_link_end(keyring, &index_key, edit);
899 903
900 key_ref = __key_update(key_ref, &prep); 904 key_ref = __key_update(key_ref, &prep);
901 goto error_free_prep; 905 goto error_free_prep;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 33cfd27b4de2..cee72ce64222 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1667,6 +1667,9 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
1667 case KEYCTL_INVALIDATE: 1667 case KEYCTL_INVALIDATE:
1668 return keyctl_invalidate_key((key_serial_t) arg2); 1668 return keyctl_invalidate_key((key_serial_t) arg2);
1669 1669
1670 case KEYCTL_GET_PERSISTENT:
1671 return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
1672
1670 default: 1673 default:
1671 return -EOPNOTSUPP; 1674 return -EOPNOTSUPP;
1672 } 1675 }
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 6ece7f2e5707..d46cbc5e335e 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1,6 +1,6 @@
1/* Keyring handling 1/* Keyring handling
2 * 2 *
3 * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -17,25 +17,11 @@
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <keys/keyring-type.h> 19#include <keys/keyring-type.h>
20#include <keys/user-type.h>
21#include <linux/assoc_array_priv.h>
20#include <linux/uaccess.h> 22#include <linux/uaccess.h>
21#include "internal.h" 23#include "internal.h"
22 24
23#define rcu_dereference_locked_keyring(keyring) \
24 (rcu_dereference_protected( \
25 (keyring)->payload.subscriptions, \
26 rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
27
28#define rcu_deref_link_locked(klist, index, keyring) \
29 (rcu_dereference_protected( \
30 (klist)->keys[index], \
31 rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
32
33#define MAX_KEYRING_LINKS \
34 min_t(size_t, USHRT_MAX - 1, \
35 ((PAGE_SIZE - sizeof(struct keyring_list)) / sizeof(struct key *)))
36
37#define KEY_LINK_FIXQUOTA 1UL
38
39/* 25/*
40 * When plumbing the depths of the key tree, this sets a hard limit 26 * When plumbing the depths of the key tree, this sets a hard limit
41 * set on how deep we're willing to go. 27 * set on how deep we're willing to go.
@@ -47,6 +33,28 @@
47 */ 33 */
48#define KEYRING_NAME_HASH_SIZE (1 << 5) 34#define KEYRING_NAME_HASH_SIZE (1 << 5)
49 35
36/*
37 * We mark pointers we pass to the associative array with bit 1 set if
38 * they're keyrings and clear otherwise.
39 */
40#define KEYRING_PTR_SUBTYPE 0x2UL
41
42static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x)
43{
44 return (unsigned long)x & KEYRING_PTR_SUBTYPE;
45}
46static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x)
47{
48 void *object = assoc_array_ptr_to_leaf(x);
49 return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE);
50}
51static inline void *keyring_key_to_ptr(struct key *key)
52{
53 if (key->type == &key_type_keyring)
54 return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE);
55 return key;
56}
57
50static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE]; 58static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE];
51static DEFINE_RWLOCK(keyring_name_lock); 59static DEFINE_RWLOCK(keyring_name_lock);
52 60
@@ -67,7 +75,6 @@ static inline unsigned keyring_hash(const char *desc)
67 */ 75 */
68static int keyring_instantiate(struct key *keyring, 76static int keyring_instantiate(struct key *keyring,
69 struct key_preparsed_payload *prep); 77 struct key_preparsed_payload *prep);
70static int keyring_match(const struct key *keyring, const void *criterion);
71static void keyring_revoke(struct key *keyring); 78static void keyring_revoke(struct key *keyring);
72static void keyring_destroy(struct key *keyring); 79static void keyring_destroy(struct key *keyring);
73static void keyring_describe(const struct key *keyring, struct seq_file *m); 80static void keyring_describe(const struct key *keyring, struct seq_file *m);
@@ -76,9 +83,9 @@ static long keyring_read(const struct key *keyring,
76 83
77struct key_type key_type_keyring = { 84struct key_type key_type_keyring = {
78 .name = "keyring", 85 .name = "keyring",
79 .def_datalen = sizeof(struct keyring_list), 86 .def_datalen = 0,
80 .instantiate = keyring_instantiate, 87 .instantiate = keyring_instantiate,
81 .match = keyring_match, 88 .match = user_match,
82 .revoke = keyring_revoke, 89 .revoke = keyring_revoke,
83 .destroy = keyring_destroy, 90 .destroy = keyring_destroy,
84 .describe = keyring_describe, 91 .describe = keyring_describe,
@@ -127,6 +134,7 @@ static int keyring_instantiate(struct key *keyring,
127 134
128 ret = -EINVAL; 135 ret = -EINVAL;
129 if (prep->datalen == 0) { 136 if (prep->datalen == 0) {
137 assoc_array_init(&keyring->keys);
130 /* make the keyring available by name if it has one */ 138 /* make the keyring available by name if it has one */
131 keyring_publish_name(keyring); 139 keyring_publish_name(keyring);
132 ret = 0; 140 ret = 0;
@@ -136,15 +144,225 @@ static int keyring_instantiate(struct key *keyring,
136} 144}
137 145
138/* 146/*
139 * Match keyrings on their name 147 * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd
148 * fold the carry back too, but that requires inline asm.
149 */
150static u64 mult_64x32_and_fold(u64 x, u32 y)
151{
152 u64 hi = (u64)(u32)(x >> 32) * y;
153 u64 lo = (u64)(u32)(x) * y;
154 return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32);
155}
156
157/*
158 * Hash a key type and description.
159 */
160static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
161{
162 const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
163 const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
164 const char *description = index_key->description;
165 unsigned long hash, type;
166 u32 piece;
167 u64 acc;
168 int n, desc_len = index_key->desc_len;
169
170 type = (unsigned long)index_key->type;
171
172 acc = mult_64x32_and_fold(type, desc_len + 13);
173 acc = mult_64x32_and_fold(acc, 9207);
174 for (;;) {
175 n = desc_len;
176 if (n <= 0)
177 break;
178 if (n > 4)
179 n = 4;
180 piece = 0;
181 memcpy(&piece, description, n);
182 description += n;
183 desc_len -= n;
184 acc = mult_64x32_and_fold(acc, piece);
185 acc = mult_64x32_and_fold(acc, 9207);
186 }
187
188 /* Fold the hash down to 32 bits if need be. */
189 hash = acc;
190 if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32)
191 hash ^= acc >> 32;
192
193 /* Squidge all the keyrings into a separate part of the tree to
194 * ordinary keys by making sure the lowest level segment in the hash is
195 * zero for keyrings and non-zero otherwise.
196 */
197 if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
198 return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
199 if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
200 return (hash + (hash << level_shift)) & ~fan_mask;
201 return hash;
202}
203
204/*
205 * Build the next index key chunk.
206 *
207 * On 32-bit systems the index key is laid out as:
208 *
209 * 0 4 5 9...
210 * hash desclen typeptr desc[]
211 *
212 * On 64-bit systems:
213 *
214 * 0 8 9 17...
215 * hash desclen typeptr desc[]
216 *
217 * We return it one word-sized chunk at a time.
140 */ 218 */
141static int keyring_match(const struct key *keyring, const void *description) 219static unsigned long keyring_get_key_chunk(const void *data, int level)
220{
221 const struct keyring_index_key *index_key = data;
222 unsigned long chunk = 0;
223 long offset = 0;
224 int desc_len = index_key->desc_len, n = sizeof(chunk);
225
226 level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
227 switch (level) {
228 case 0:
229 return hash_key_type_and_desc(index_key);
230 case 1:
231 return ((unsigned long)index_key->type << 8) | desc_len;
232 case 2:
233 if (desc_len == 0)
234 return (u8)((unsigned long)index_key->type >>
235 (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
236 n--;
237 offset = 1;
238 default:
239 offset += sizeof(chunk) - 1;
240 offset += (level - 3) * sizeof(chunk);
241 if (offset >= desc_len)
242 return 0;
243 desc_len -= offset;
244 if (desc_len > n)
245 desc_len = n;
246 offset += desc_len;
247 do {
248 chunk <<= 8;
249 chunk |= ((u8*)index_key->description)[--offset];
250 } while (--desc_len > 0);
251
252 if (level == 2) {
253 chunk <<= 8;
254 chunk |= (u8)((unsigned long)index_key->type >>
255 (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
256 }
257 return chunk;
258 }
259}
260
261static unsigned long keyring_get_object_key_chunk(const void *object, int level)
262{
263 const struct key *key = keyring_ptr_to_key(object);
264 return keyring_get_key_chunk(&key->index_key, level);
265}
266
267static bool keyring_compare_object(const void *object, const void *data)
142{ 268{
143 return keyring->description && 269 const struct keyring_index_key *index_key = data;
144 strcmp(keyring->description, description) == 0; 270 const struct key *key = keyring_ptr_to_key(object);
271
272 return key->index_key.type == index_key->type &&
273 key->index_key.desc_len == index_key->desc_len &&
274 memcmp(key->index_key.description, index_key->description,
275 index_key->desc_len) == 0;
145} 276}
146 277
147/* 278/*
279 * Compare the index keys of a pair of objects and determine the bit position
280 * at which they differ - if they differ.
281 */
282static int keyring_diff_objects(const void *object, const void *data)
283{
284 const struct key *key_a = keyring_ptr_to_key(object);
285 const struct keyring_index_key *a = &key_a->index_key;
286 const struct keyring_index_key *b = data;
287 unsigned long seg_a, seg_b;
288 int level, i;
289
290 level = 0;
291 seg_a = hash_key_type_and_desc(a);
292 seg_b = hash_key_type_and_desc(b);
293 if ((seg_a ^ seg_b) != 0)
294 goto differ;
295
296 /* The number of bits contributed by the hash is controlled by a
297 * constant in the assoc_array headers. Everything else thereafter we
298 * can deal with as being machine word-size dependent.
299 */
300 level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
301 seg_a = a->desc_len;
302 seg_b = b->desc_len;
303 if ((seg_a ^ seg_b) != 0)
304 goto differ;
305
306 /* The next bit may not work on big endian */
307 level++;
308 seg_a = (unsigned long)a->type;
309 seg_b = (unsigned long)b->type;
310 if ((seg_a ^ seg_b) != 0)
311 goto differ;
312
313 level += sizeof(unsigned long);
314 if (a->desc_len == 0)
315 goto same;
316
317 i = 0;
318 if (((unsigned long)a->description | (unsigned long)b->description) &
319 (sizeof(unsigned long) - 1)) {
320 do {
321 seg_a = *(unsigned long *)(a->description + i);
322 seg_b = *(unsigned long *)(b->description + i);
323 if ((seg_a ^ seg_b) != 0)
324 goto differ_plus_i;
325 i += sizeof(unsigned long);
326 } while (i < (a->desc_len & (sizeof(unsigned long) - 1)));
327 }
328
329 for (; i < a->desc_len; i++) {
330 seg_a = *(unsigned char *)(a->description + i);
331 seg_b = *(unsigned char *)(b->description + i);
332 if ((seg_a ^ seg_b) != 0)
333 goto differ_plus_i;
334 }
335
336same:
337 return -1;
338
339differ_plus_i:
340 level += i;
341differ:
342 i = level * 8 + __ffs(seg_a ^ seg_b);
343 return i;
344}
345
346/*
347 * Free an object after stripping the keyring flag off of the pointer.
348 */
349static void keyring_free_object(void *object)
350{
351 key_put(keyring_ptr_to_key(object));
352}
353
354/*
355 * Operations for keyring management by the index-tree routines.
356 */
357static const struct assoc_array_ops keyring_assoc_array_ops = {
358 .get_key_chunk = keyring_get_key_chunk,
359 .get_object_key_chunk = keyring_get_object_key_chunk,
360 .compare_object = keyring_compare_object,
361 .diff_objects = keyring_diff_objects,
362 .free_object = keyring_free_object,
363};
364
365/*
148 * Clean up a keyring when it is destroyed. Unpublish its name if it had one 366 * Clean up a keyring when it is destroyed. Unpublish its name if it had one
149 * and dispose of its data. 367 * and dispose of its data.
150 * 368 *
@@ -155,9 +373,6 @@ static int keyring_match(const struct key *keyring, const void *description)
155 */ 373 */
156static void keyring_destroy(struct key *keyring) 374static void keyring_destroy(struct key *keyring)
157{ 375{
158 struct keyring_list *klist;
159 int loop;
160
161 if (keyring->description) { 376 if (keyring->description) {
162 write_lock(&keyring_name_lock); 377 write_lock(&keyring_name_lock);
163 378
@@ -168,12 +383,7 @@ static void keyring_destroy(struct key *keyring)
168 write_unlock(&keyring_name_lock); 383 write_unlock(&keyring_name_lock);
169 } 384 }
170 385
171 klist = rcu_access_pointer(keyring->payload.subscriptions); 386 assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops);
172 if (klist) {
173 for (loop = klist->nkeys - 1; loop >= 0; loop--)
174 key_put(rcu_access_pointer(klist->keys[loop]));
175 kfree(klist);
176 }
177} 387}
178 388
179/* 389/*
@@ -181,76 +391,88 @@ static void keyring_destroy(struct key *keyring)
181 */ 391 */
182static void keyring_describe(const struct key *keyring, struct seq_file *m) 392static void keyring_describe(const struct key *keyring, struct seq_file *m)
183{ 393{
184 struct keyring_list *klist;
185
186 if (keyring->description) 394 if (keyring->description)
187 seq_puts(m, keyring->description); 395 seq_puts(m, keyring->description);
188 else 396 else
189 seq_puts(m, "[anon]"); 397 seq_puts(m, "[anon]");
190 398
191 if (key_is_instantiated(keyring)) { 399 if (key_is_instantiated(keyring)) {
192 rcu_read_lock(); 400 if (keyring->keys.nr_leaves_on_tree != 0)
193 klist = rcu_dereference(keyring->payload.subscriptions); 401 seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
194 if (klist)
195 seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys);
196 else 402 else
197 seq_puts(m, ": empty"); 403 seq_puts(m, ": empty");
198 rcu_read_unlock();
199 } 404 }
200} 405}
201 406
407struct keyring_read_iterator_context {
408 size_t qty;
409 size_t count;
410 key_serial_t __user *buffer;
411};
412
413static int keyring_read_iterator(const void *object, void *data)
414{
415 struct keyring_read_iterator_context *ctx = data;
416 const struct key *key = keyring_ptr_to_key(object);
417 int ret;
418
419 kenter("{%s,%d},,{%zu/%zu}",
420 key->type->name, key->serial, ctx->count, ctx->qty);
421
422 if (ctx->count >= ctx->qty)
423 return 1;
424
425 ret = put_user(key->serial, ctx->buffer);
426 if (ret < 0)
427 return ret;
428 ctx->buffer++;
429 ctx->count += sizeof(key->serial);
430 return 0;
431}
432
202/* 433/*
203 * Read a list of key IDs from the keyring's contents in binary form 434 * Read a list of key IDs from the keyring's contents in binary form
204 * 435 *
205 * The keyring's semaphore is read-locked by the caller. 436 * The keyring's semaphore is read-locked by the caller. This prevents someone
437 * from modifying it under us - which could cause us to read key IDs multiple
438 * times.
206 */ 439 */
207static long keyring_read(const struct key *keyring, 440static long keyring_read(const struct key *keyring,
208 char __user *buffer, size_t buflen) 441 char __user *buffer, size_t buflen)
209{ 442{
210 struct keyring_list *klist; 443 struct keyring_read_iterator_context ctx;
211 struct key *key; 444 unsigned long nr_keys;
212 size_t qty, tmp; 445 int ret;
213 int loop, ret;
214 446
215 ret = 0; 447 kenter("{%d},,%zu", key_serial(keyring), buflen);
216 klist = rcu_dereference_locked_keyring(keyring); 448
217 if (klist) { 449 if (buflen & (sizeof(key_serial_t) - 1))
218 /* calculate how much data we could return */ 450 return -EINVAL;
219 qty = klist->nkeys * sizeof(key_serial_t); 451
220 452 nr_keys = keyring->keys.nr_leaves_on_tree;
221 if (buffer && buflen > 0) { 453 if (nr_keys == 0)
222 if (buflen > qty) 454 return 0;
223 buflen = qty;
224
225 /* copy the IDs of the subscribed keys into the
226 * buffer */
227 ret = -EFAULT;
228
229 for (loop = 0; loop < klist->nkeys; loop++) {
230 key = rcu_deref_link_locked(klist, loop,
231 keyring);
232
233 tmp = sizeof(key_serial_t);
234 if (tmp > buflen)
235 tmp = buflen;
236
237 if (copy_to_user(buffer,
238 &key->serial,
239 tmp) != 0)
240 goto error;
241
242 buflen -= tmp;
243 if (buflen == 0)
244 break;
245 buffer += tmp;
246 }
247 }
248 455
249 ret = qty; 456 /* Calculate how much data we could return */
457 ctx.qty = nr_keys * sizeof(key_serial_t);
458
459 if (!buffer || !buflen)
460 return ctx.qty;
461
462 if (buflen > ctx.qty)
463 ctx.qty = buflen;
464
465 /* Copy the IDs of the subscribed keys into the buffer */
466 ctx.buffer = (key_serial_t __user *)buffer;
467 ctx.count = 0;
468 ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
469 if (ret < 0) {
470 kleave(" = %d [iterate]", ret);
471 return ret;
250 } 472 }
251 473
252error: 474 kleave(" = %zu [ok]", ctx.count);
253 return ret; 475 return ctx.count;
254} 476}
255 477
256/* 478/*
@@ -277,227 +499,361 @@ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
277} 499}
278EXPORT_SYMBOL(keyring_alloc); 500EXPORT_SYMBOL(keyring_alloc);
279 501
280/** 502/*
281 * keyring_search_aux - Search a keyring tree for a key matching some criteria 503 * Iteration function to consider each key found.
282 * @keyring_ref: A pointer to the keyring with possession indicator.
283 * @cred: The credentials to use for permissions checks.
284 * @type: The type of key to search for.
285 * @description: Parameter for @match.
286 * @match: Function to rule on whether or not a key is the one required.
287 * @no_state_check: Don't check if a matching key is bad
288 *
289 * Search the supplied keyring tree for a key that matches the criteria given.
290 * The root keyring and any linked keyrings must grant Search permission to the
291 * caller to be searchable and keys can only be found if they too grant Search
292 * to the caller. The possession flag on the root keyring pointer controls use
293 * of the possessor bits in permissions checking of the entire tree. In
294 * addition, the LSM gets to forbid keyring searches and key matches.
295 *
296 * The search is performed as a breadth-then-depth search up to the prescribed
297 * limit (KEYRING_SEARCH_MAX_DEPTH).
298 *
299 * Keys are matched to the type provided and are then filtered by the match
300 * function, which is given the description to use in any way it sees fit. The
301 * match function may use any attributes of a key that it wishes to to
302 * determine the match. Normally the match function from the key type would be
303 * used.
304 *
305 * RCU is used to prevent the keyring key lists from disappearing without the
306 * need to take lots of locks.
307 *
308 * Returns a pointer to the found key and increments the key usage count if
309 * successful; -EAGAIN if no matching keys were found, or if expired or revoked
310 * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
311 * specified keyring wasn't a keyring.
312 *
313 * In the case of a successful return, the possession attribute from
314 * @keyring_ref is propagated to the returned key reference.
315 */ 504 */
316key_ref_t keyring_search_aux(key_ref_t keyring_ref, 505static int keyring_search_iterator(const void *object, void *iterator_data)
317 const struct cred *cred,
318 struct key_type *type,
319 const void *description,
320 key_match_func_t match,
321 bool no_state_check)
322{ 506{
323 struct { 507 struct keyring_search_context *ctx = iterator_data;
324 /* Need a separate keylist pointer for RCU purposes */ 508 const struct key *key = keyring_ptr_to_key(object);
325 struct key *keyring; 509 unsigned long kflags = key->flags;
326 struct keyring_list *keylist;
327 int kix;
328 } stack[KEYRING_SEARCH_MAX_DEPTH];
329
330 struct keyring_list *keylist;
331 struct timespec now;
332 unsigned long possessed, kflags;
333 struct key *keyring, *key;
334 key_ref_t key_ref;
335 long err;
336 int sp, nkeys, kix;
337 510
338 keyring = key_ref_to_ptr(keyring_ref); 511 kenter("{%d}", key->serial);
339 possessed = is_key_possessed(keyring_ref);
340 key_check(keyring);
341 512
342 /* top keyring must have search permission to begin the search */ 513 /* ignore keys not of this type */
343 err = key_task_permission(keyring_ref, cred, KEY_SEARCH); 514 if (key->type != ctx->index_key.type) {
344 if (err < 0) { 515 kleave(" = 0 [!type]");
345 key_ref = ERR_PTR(err); 516 return 0;
346 goto error;
347 } 517 }
348 518
349 key_ref = ERR_PTR(-ENOTDIR); 519 /* skip invalidated, revoked and expired keys */
350 if (keyring->type != &key_type_keyring) 520 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
351 goto error; 521 if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
522 (1 << KEY_FLAG_REVOKED))) {
523 ctx->result = ERR_PTR(-EKEYREVOKED);
524 kleave(" = %d [invrev]", ctx->skipped_ret);
525 goto skipped;
526 }
352 527
353 rcu_read_lock(); 528 if (key->expiry && ctx->now.tv_sec >= key->expiry) {
529 ctx->result = ERR_PTR(-EKEYEXPIRED);
530 kleave(" = %d [expire]", ctx->skipped_ret);
531 goto skipped;
532 }
533 }
354 534
355 now = current_kernel_time(); 535 /* keys that don't match */
356 err = -EAGAIN; 536 if (!ctx->match(key, ctx->match_data)) {
357 sp = 0; 537 kleave(" = 0 [!match]");
358 538 return 0;
359 /* firstly we should check to see if this top-level keyring is what we 539 }
360 * are looking for */
361 key_ref = ERR_PTR(-EAGAIN);
362 kflags = keyring->flags;
363 if (keyring->type == type && match(keyring, description)) {
364 key = keyring;
365 if (no_state_check)
366 goto found;
367 540
368 /* check it isn't negative and hasn't expired or been 541 /* key must have search permissions */
369 * revoked */ 542 if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
370 if (kflags & (1 << KEY_FLAG_REVOKED)) 543 key_task_permission(make_key_ref(key, ctx->possessed),
371 goto error_2; 544 ctx->cred, KEY_SEARCH) < 0) {
372 if (key->expiry && now.tv_sec >= key->expiry) 545 ctx->result = ERR_PTR(-EACCES);
373 goto error_2; 546 kleave(" = %d [!perm]", ctx->skipped_ret);
374 key_ref = ERR_PTR(key->type_data.reject_error); 547 goto skipped;
375 if (kflags & (1 << KEY_FLAG_NEGATIVE))
376 goto error_2;
377 goto found;
378 } 548 }
379 549
380 /* otherwise, the top keyring must not be revoked, expired, or 550 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
381 * negatively instantiated if we are to search it */ 551 /* we set a different error code if we pass a negative key */
382 key_ref = ERR_PTR(-EAGAIN); 552 if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
383 if (kflags & ((1 << KEY_FLAG_INVALIDATED) | 553 smp_rmb();
384 (1 << KEY_FLAG_REVOKED) | 554 ctx->result = ERR_PTR(key->type_data.reject_error);
385 (1 << KEY_FLAG_NEGATIVE)) || 555 kleave(" = %d [neg]", ctx->skipped_ret);
386 (keyring->expiry && now.tv_sec >= keyring->expiry)) 556 goto skipped;
387 goto error_2; 557 }
388 558 }
389 /* start processing a new keyring */
390descend:
391 kflags = keyring->flags;
392 if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
393 (1 << KEY_FLAG_REVOKED)))
394 goto not_this_keyring;
395 559
396 keylist = rcu_dereference(keyring->payload.subscriptions); 560 /* Found */
397 if (!keylist) 561 ctx->result = make_key_ref(key, ctx->possessed);
398 goto not_this_keyring; 562 kleave(" = 1 [found]");
563 return 1;
399 564
400 /* iterate through the keys in this keyring first */ 565skipped:
401 nkeys = keylist->nkeys; 566 return ctx->skipped_ret;
402 smp_rmb(); 567}
403 for (kix = 0; kix < nkeys; kix++) {
404 key = rcu_dereference(keylist->keys[kix]);
405 kflags = key->flags;
406 568
407 /* ignore keys not of this type */ 569/*
408 if (key->type != type) 570 * Search inside a keyring for a key. We can search by walking to it
409 continue; 571 * directly based on its index-key or we can iterate over the entire
572 * tree looking for it, based on the match function.
573 */
574static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
575{
576 if ((ctx->flags & KEYRING_SEARCH_LOOKUP_TYPE) ==
577 KEYRING_SEARCH_LOOKUP_DIRECT) {
578 const void *object;
579
580 object = assoc_array_find(&keyring->keys,
581 &keyring_assoc_array_ops,
582 &ctx->index_key);
583 return object ? ctx->iterator(object, ctx) : 0;
584 }
585 return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx);
586}
410 587
411 /* skip invalidated, revoked and expired keys */ 588/*
412 if (!no_state_check) { 589 * Search a tree of keyrings that point to other keyrings up to the maximum
413 if (kflags & ((1 << KEY_FLAG_INVALIDATED) | 590 * depth.
414 (1 << KEY_FLAG_REVOKED))) 591 */
415 continue; 592static bool search_nested_keyrings(struct key *keyring,
593 struct keyring_search_context *ctx)
594{
595 struct {
596 struct key *keyring;
597 struct assoc_array_node *node;
598 int slot;
599 } stack[KEYRING_SEARCH_MAX_DEPTH];
416 600
417 if (key->expiry && now.tv_sec >= key->expiry) 601 struct assoc_array_shortcut *shortcut;
418 continue; 602 struct assoc_array_node *node;
419 } 603 struct assoc_array_ptr *ptr;
604 struct key *key;
605 int sp = 0, slot;
420 606
421 /* keys that don't match */ 607 kenter("{%d},{%s,%s}",
422 if (!match(key, description)) 608 keyring->serial,
423 continue; 609 ctx->index_key.type->name,
610 ctx->index_key.description);
424 611
425 /* key must have search permissions */ 612 if (ctx->index_key.description)
426 if (key_task_permission(make_key_ref(key, possessed), 613 ctx->index_key.desc_len = strlen(ctx->index_key.description);
427 cred, KEY_SEARCH) < 0)
428 continue;
429 614
430 if (no_state_check) 615 /* Check to see if this top-level keyring is what we are looking for
616 * and whether it is valid or not.
617 */
618 if (ctx->flags & KEYRING_SEARCH_LOOKUP_ITERATE ||
619 keyring_compare_object(keyring, &ctx->index_key)) {
620 ctx->skipped_ret = 2;
621 ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
622 switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
623 case 1:
431 goto found; 624 goto found;
432 625 case 2:
433 /* we set a different error code if we pass a negative key */ 626 return false;
434 if (kflags & (1 << KEY_FLAG_NEGATIVE)) { 627 default:
435 err = key->type_data.reject_error; 628 break;
436 continue;
437 } 629 }
630 }
631
632 ctx->skipped_ret = 0;
633 if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
634 ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
438 635
636 /* Start processing a new keyring */
637descend_to_keyring:
638 kdebug("descend to %d", keyring->serial);
639 if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
640 (1 << KEY_FLAG_REVOKED)))
641 goto not_this_keyring;
642
643 /* Search through the keys in this keyring before its searching its
644 * subtrees.
645 */
646 if (search_keyring(keyring, ctx))
439 goto found; 647 goto found;
440 }
441 648
442 /* search through the keyrings nested in this one */ 649 /* Then manually iterate through the keyrings nested in this one.
443 kix = 0; 650 *
444ascend: 651 * Start from the root node of the index tree. Because of the way the
445 nkeys = keylist->nkeys; 652 * hash function has been set up, keyrings cluster on the leftmost
446 smp_rmb(); 653 * branch of the root node (root slot 0) or in the root node itself.
447 for (; kix < nkeys; kix++) { 654 * Non-keyrings avoid the leftmost branch of the root entirely (root
448 key = rcu_dereference(keylist->keys[kix]); 655 * slots 1-15).
449 if (key->type != &key_type_keyring) 656 */
450 continue; 657 ptr = ACCESS_ONCE(keyring->keys.root);
658 if (!ptr)
659 goto not_this_keyring;
451 660
452 /* recursively search nested keyrings 661 if (assoc_array_ptr_is_shortcut(ptr)) {
453 * - only search keyrings for which we have search permission 662 /* If the root is a shortcut, either the keyring only contains
663 * keyring pointers (everything clusters behind root slot 0) or
664 * doesn't contain any keyring pointers.
454 */ 665 */
455 if (sp >= KEYRING_SEARCH_MAX_DEPTH) 666 shortcut = assoc_array_ptr_to_shortcut(ptr);
667 smp_read_barrier_depends();
668 if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
669 goto not_this_keyring;
670
671 ptr = ACCESS_ONCE(shortcut->next_node);
672 node = assoc_array_ptr_to_node(ptr);
673 goto begin_node;
674 }
675
676 node = assoc_array_ptr_to_node(ptr);
677 smp_read_barrier_depends();
678
679 ptr = node->slots[0];
680 if (!assoc_array_ptr_is_meta(ptr))
681 goto begin_node;
682
683descend_to_node:
684 /* Descend to a more distal node in this keyring's content tree and go
685 * through that.
686 */
687 kdebug("descend");
688 if (assoc_array_ptr_is_shortcut(ptr)) {
689 shortcut = assoc_array_ptr_to_shortcut(ptr);
690 smp_read_barrier_depends();
691 ptr = ACCESS_ONCE(shortcut->next_node);
692 BUG_ON(!assoc_array_ptr_is_node(ptr));
693 }
694 node = assoc_array_ptr_to_node(ptr);
695
696begin_node:
697 kdebug("begin_node");
698 smp_read_barrier_depends();
699 slot = 0;
700ascend_to_node:
701 /* Go through the slots in a node */
702 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
703 ptr = ACCESS_ONCE(node->slots[slot]);
704
705 if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
706 goto descend_to_node;
707
708 if (!keyring_ptr_is_keyring(ptr))
456 continue; 709 continue;
457 710
458 if (key_task_permission(make_key_ref(key, possessed), 711 key = keyring_ptr_to_key(ptr);
459 cred, KEY_SEARCH) < 0) 712
713 if (sp >= KEYRING_SEARCH_MAX_DEPTH) {
714 if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) {
715 ctx->result = ERR_PTR(-ELOOP);
716 return false;
717 }
718 goto not_this_keyring;
719 }
720
721 /* Search a nested keyring */
722 if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
723 key_task_permission(make_key_ref(key, ctx->possessed),
724 ctx->cred, KEY_SEARCH) < 0)
460 continue; 725 continue;
461 726
462 /* stack the current position */ 727 /* stack the current position */
463 stack[sp].keyring = keyring; 728 stack[sp].keyring = keyring;
464 stack[sp].keylist = keylist; 729 stack[sp].node = node;
465 stack[sp].kix = kix; 730 stack[sp].slot = slot;
466 sp++; 731 sp++;
467 732
468 /* begin again with the new keyring */ 733 /* begin again with the new keyring */
469 keyring = key; 734 keyring = key;
470 goto descend; 735 goto descend_to_keyring;
471 } 736 }
472 737
473 /* the keyring we're looking at was disqualified or didn't contain a 738 /* We've dealt with all the slots in the current node, so now we need
474 * matching key */ 739 * to ascend to the parent and continue processing there.
740 */
741 ptr = ACCESS_ONCE(node->back_pointer);
742 slot = node->parent_slot;
743
744 if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
745 shortcut = assoc_array_ptr_to_shortcut(ptr);
746 smp_read_barrier_depends();
747 ptr = ACCESS_ONCE(shortcut->back_pointer);
748 slot = shortcut->parent_slot;
749 }
750 if (!ptr)
751 goto not_this_keyring;
752 node = assoc_array_ptr_to_node(ptr);
753 smp_read_barrier_depends();
754 slot++;
755
756 /* If we've ascended to the root (zero backpointer), we must have just
757 * finished processing the leftmost branch rather than the root slots -
758 * so there can't be any more keyrings for us to find.
759 */
760 if (node->back_pointer) {
761 kdebug("ascend %d", slot);
762 goto ascend_to_node;
763 }
764
765 /* The keyring we're looking at was disqualified or didn't contain a
766 * matching key.
767 */
475not_this_keyring: 768not_this_keyring:
476 if (sp > 0) { 769 kdebug("not_this_keyring %d", sp);
477 /* resume the processing of a keyring higher up in the tree */ 770 if (sp <= 0) {
478 sp--; 771 kleave(" = false");
479 keyring = stack[sp].keyring; 772 return false;
480 keylist = stack[sp].keylist;
481 kix = stack[sp].kix + 1;
482 goto ascend;
483 } 773 }
484 774
485 key_ref = ERR_PTR(err); 775 /* Resume the processing of a keyring higher up in the tree */
486 goto error_2; 776 sp--;
777 keyring = stack[sp].keyring;
778 node = stack[sp].node;
779 slot = stack[sp].slot + 1;
780 kdebug("ascend to %d [%d]", keyring->serial, slot);
781 goto ascend_to_node;
487 782
488 /* we found a viable match */ 783 /* We found a viable match */
489found: 784found:
490 atomic_inc(&key->usage); 785 key = key_ref_to_ptr(ctx->result);
491 key->last_used_at = now.tv_sec;
492 keyring->last_used_at = now.tv_sec;
493 while (sp > 0)
494 stack[--sp].keyring->last_used_at = now.tv_sec;
495 key_check(key); 786 key_check(key);
496 key_ref = make_key_ref(key, possessed); 787 if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
497error_2: 788 key->last_used_at = ctx->now.tv_sec;
789 keyring->last_used_at = ctx->now.tv_sec;
790 while (sp > 0)
791 stack[--sp].keyring->last_used_at = ctx->now.tv_sec;
792 }
793 kleave(" = true");
794 return true;
795}
796
797/**
798 * keyring_search_aux - Search a keyring tree for a key matching some criteria
799 * @keyring_ref: A pointer to the keyring with possession indicator.
800 * @ctx: The keyring search context.
801 *
802 * Search the supplied keyring tree for a key that matches the criteria given.
803 * The root keyring and any linked keyrings must grant Search permission to the
804 * caller to be searchable and keys can only be found if they too grant Search
805 * to the caller. The possession flag on the root keyring pointer controls use
806 * of the possessor bits in permissions checking of the entire tree. In
807 * addition, the LSM gets to forbid keyring searches and key matches.
808 *
809 * The search is performed as a breadth-then-depth search up to the prescribed
810 * limit (KEYRING_SEARCH_MAX_DEPTH).
811 *
812 * Keys are matched to the type provided and are then filtered by the match
813 * function, which is given the description to use in any way it sees fit. The
814 * match function may use any attributes of a key that it wishes to to
815 * determine the match. Normally the match function from the key type would be
816 * used.
817 *
818 * RCU can be used to prevent the keyring key lists from disappearing without
819 * the need to take lots of locks.
820 *
821 * Returns a pointer to the found key and increments the key usage count if
822 * successful; -EAGAIN if no matching keys were found, or if expired or revoked
823 * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
824 * specified keyring wasn't a keyring.
825 *
826 * In the case of a successful return, the possession attribute from
827 * @keyring_ref is propagated to the returned key reference.
828 */
829key_ref_t keyring_search_aux(key_ref_t keyring_ref,
830 struct keyring_search_context *ctx)
831{
832 struct key *keyring;
833 long err;
834
835 ctx->iterator = keyring_search_iterator;
836 ctx->possessed = is_key_possessed(keyring_ref);
837 ctx->result = ERR_PTR(-EAGAIN);
838
839 keyring = key_ref_to_ptr(keyring_ref);
840 key_check(keyring);
841
842 if (keyring->type != &key_type_keyring)
843 return ERR_PTR(-ENOTDIR);
844
845 if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) {
846 err = key_task_permission(keyring_ref, ctx->cred, KEY_SEARCH);
847 if (err < 0)
848 return ERR_PTR(err);
849 }
850
851 rcu_read_lock();
852 ctx->now = current_kernel_time();
853 if (search_nested_keyrings(keyring, ctx))
854 __key_get(key_ref_to_ptr(ctx->result));
498 rcu_read_unlock(); 855 rcu_read_unlock();
499error: 856 return ctx->result;
500 return key_ref;
501} 857}
502 858
503/** 859/**
@@ -507,77 +863,73 @@ error:
507 * @description: The name of the keyring we want to find. 863 * @description: The name of the keyring we want to find.
508 * 864 *
509 * As keyring_search_aux() above, but using the current task's credentials and 865 * As keyring_search_aux() above, but using the current task's credentials and
510 * type's default matching function. 866 * type's default matching function and preferred search method.
511 */ 867 */
512key_ref_t keyring_search(key_ref_t keyring, 868key_ref_t keyring_search(key_ref_t keyring,
513 struct key_type *type, 869 struct key_type *type,
514 const char *description) 870 const char *description)
515{ 871{
516 if (!type->match) 872 struct keyring_search_context ctx = {
873 .index_key.type = type,
874 .index_key.description = description,
875 .cred = current_cred(),
876 .match = type->match,
877 .match_data = description,
878 .flags = (type->def_lookup_type |
879 KEYRING_SEARCH_DO_STATE_CHECK),
880 };
881
882 if (!ctx.match)
517 return ERR_PTR(-ENOKEY); 883 return ERR_PTR(-ENOKEY);
518 884
519 return keyring_search_aux(keyring, current->cred, 885 return keyring_search_aux(keyring, &ctx);
520 type, description, type->match, false);
521} 886}
522EXPORT_SYMBOL(keyring_search); 887EXPORT_SYMBOL(keyring_search);
523 888
524/* 889/*
525 * Search the given keyring only (no recursion). 890 * Search the given keyring for a key that might be updated.
526 * 891 *
527 * The caller must guarantee that the keyring is a keyring and that the 892 * The caller must guarantee that the keyring is a keyring and that the
528 * permission is granted to search the keyring as no check is made here. 893 * permission is granted to modify the keyring as no check is made here. The
529 * 894 * caller must also hold a lock on the keyring semaphore.
530 * RCU is used to make it unnecessary to lock the keyring key list here.
531 * 895 *
532 * Returns a pointer to the found key with usage count incremented if 896 * Returns a pointer to the found key with usage count incremented if
533 * successful and returns -ENOKEY if not found. Revoked keys and keys not 897 * successful and returns NULL if not found. Revoked and invalidated keys are
534 * providing the requested permission are skipped over. 898 * skipped over.
535 * 899 *
536 * If successful, the possession indicator is propagated from the keyring ref 900 * If successful, the possession indicator is propagated from the keyring ref
537 * to the returned key reference. 901 * to the returned key reference.
538 */ 902 */
539key_ref_t __keyring_search_one(key_ref_t keyring_ref, 903key_ref_t find_key_to_update(key_ref_t keyring_ref,
540 const struct key_type *ktype, 904 const struct keyring_index_key *index_key)
541 const char *description,
542 key_perm_t perm)
543{ 905{
544 struct keyring_list *klist;
545 unsigned long possessed;
546 struct key *keyring, *key; 906 struct key *keyring, *key;
547 int nkeys, loop; 907 const void *object;
548 908
549 keyring = key_ref_to_ptr(keyring_ref); 909 keyring = key_ref_to_ptr(keyring_ref);
550 possessed = is_key_possessed(keyring_ref);
551 910
552 rcu_read_lock(); 911 kenter("{%d},{%s,%s}",
912 keyring->serial, index_key->type->name, index_key->description);
553 913
554 klist = rcu_dereference(keyring->payload.subscriptions); 914 object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops,
555 if (klist) { 915 index_key);
556 nkeys = klist->nkeys;
557 smp_rmb();
558 for (loop = 0; loop < nkeys ; loop++) {
559 key = rcu_dereference(klist->keys[loop]);
560 if (key->type == ktype &&
561 (!key->type->match ||
562 key->type->match(key, description)) &&
563 key_permission(make_key_ref(key, possessed),
564 perm) == 0 &&
565 !(key->flags & ((1 << KEY_FLAG_INVALIDATED) |
566 (1 << KEY_FLAG_REVOKED)))
567 )
568 goto found;
569 }
570 }
571 916
572 rcu_read_unlock(); 917 if (object)
573 return ERR_PTR(-ENOKEY); 918 goto found;
919
920 kleave(" = NULL");
921 return NULL;
574 922
575found: 923found:
576 atomic_inc(&key->usage); 924 key = keyring_ptr_to_key(object);
577 keyring->last_used_at = key->last_used_at = 925 if (key->flags & ((1 << KEY_FLAG_INVALIDATED) |
578 current_kernel_time().tv_sec; 926 (1 << KEY_FLAG_REVOKED))) {
579 rcu_read_unlock(); 927 kleave(" = NULL [x]");
580 return make_key_ref(key, possessed); 928 return NULL;
929 }
930 __key_get(key);
931 kleave(" = {%d}", key->serial);
932 return make_key_ref(key, is_key_possessed(keyring_ref));
581} 933}
582 934
583/* 935/*
@@ -640,6 +992,19 @@ out:
640 return keyring; 992 return keyring;
641} 993}
642 994
995static int keyring_detect_cycle_iterator(const void *object,
996 void *iterator_data)
997{
998 struct keyring_search_context *ctx = iterator_data;
999 const struct key *key = keyring_ptr_to_key(object);
1000
1001 kenter("{%d}", key->serial);
1002
1003 BUG_ON(key != ctx->match_data);
1004 ctx->result = ERR_PTR(-EDEADLK);
1005 return 1;
1006}
1007
643/* 1008/*
644 * See if a cycle will will be created by inserting acyclic tree B in acyclic 1009 * See if a cycle will will be created by inserting acyclic tree B in acyclic
645 * tree A at the topmost level (ie: as a direct child of A). 1010 * tree A at the topmost level (ie: as a direct child of A).
@@ -649,116 +1014,39 @@ out:
649 */ 1014 */
650static int keyring_detect_cycle(struct key *A, struct key *B) 1015static int keyring_detect_cycle(struct key *A, struct key *B)
651{ 1016{
652 struct { 1017 struct keyring_search_context ctx = {
653 struct keyring_list *keylist; 1018 .index_key = A->index_key,
654 int kix; 1019 .match_data = A,
655 } stack[KEYRING_SEARCH_MAX_DEPTH]; 1020 .iterator = keyring_detect_cycle_iterator,
656 1021 .flags = (KEYRING_SEARCH_LOOKUP_DIRECT |
657 struct keyring_list *keylist; 1022 KEYRING_SEARCH_NO_STATE_CHECK |
658 struct key *subtree, *key; 1023 KEYRING_SEARCH_NO_UPDATE_TIME |
659 int sp, nkeys, kix, ret; 1024 KEYRING_SEARCH_NO_CHECK_PERM |
1025 KEYRING_SEARCH_DETECT_TOO_DEEP),
1026 };
660 1027
661 rcu_read_lock(); 1028 rcu_read_lock();
662 1029 search_nested_keyrings(B, &ctx);
663 ret = -EDEADLK;
664 if (A == B)
665 goto cycle_detected;
666
667 subtree = B;
668 sp = 0;
669
670 /* start processing a new keyring */
671descend:
672 if (test_bit(KEY_FLAG_REVOKED, &subtree->flags))
673 goto not_this_keyring;
674
675 keylist = rcu_dereference(subtree->payload.subscriptions);
676 if (!keylist)
677 goto not_this_keyring;
678 kix = 0;
679
680ascend:
681 /* iterate through the remaining keys in this keyring */
682 nkeys = keylist->nkeys;
683 smp_rmb();
684 for (; kix < nkeys; kix++) {
685 key = rcu_dereference(keylist->keys[kix]);
686
687 if (key == A)
688 goto cycle_detected;
689
690 /* recursively check nested keyrings */
691 if (key->type == &key_type_keyring) {
692 if (sp >= KEYRING_SEARCH_MAX_DEPTH)
693 goto too_deep;
694
695 /* stack the current position */
696 stack[sp].keylist = keylist;
697 stack[sp].kix = kix;
698 sp++;
699
700 /* begin again with the new keyring */
701 subtree = key;
702 goto descend;
703 }
704 }
705
706 /* the keyring we're looking at was disqualified or didn't contain a
707 * matching key */
708not_this_keyring:
709 if (sp > 0) {
710 /* resume the checking of a keyring higher up in the tree */
711 sp--;
712 keylist = stack[sp].keylist;
713 kix = stack[sp].kix + 1;
714 goto ascend;
715 }
716
717 ret = 0; /* no cycles detected */
718
719error:
720 rcu_read_unlock(); 1030 rcu_read_unlock();
721 return ret; 1031 return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result);
722
723too_deep:
724 ret = -ELOOP;
725 goto error;
726
727cycle_detected:
728 ret = -EDEADLK;
729 goto error;
730}
731
732/*
733 * Dispose of a keyring list after the RCU grace period, freeing the unlinked
734 * key
735 */
736static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
737{
738 struct keyring_list *klist =
739 container_of(rcu, struct keyring_list, rcu);
740
741 if (klist->delkey != USHRT_MAX)
742 key_put(rcu_access_pointer(klist->keys[klist->delkey]));
743 kfree(klist);
744} 1032}
745 1033
746/* 1034/*
747 * Preallocate memory so that a key can be linked into to a keyring. 1035 * Preallocate memory so that a key can be linked into to a keyring.
748 */ 1036 */
749int __key_link_begin(struct key *keyring, const struct key_type *type, 1037int __key_link_begin(struct key *keyring,
750 const char *description, unsigned long *_prealloc) 1038 const struct keyring_index_key *index_key,
1039 struct assoc_array_edit **_edit)
751 __acquires(&keyring->sem) 1040 __acquires(&keyring->sem)
752 __acquires(&keyring_serialise_link_sem) 1041 __acquires(&keyring_serialise_link_sem)
753{ 1042{
754 struct keyring_list *klist, *nklist; 1043 struct assoc_array_edit *edit;
755 unsigned long prealloc; 1044 int ret;
756 unsigned max; 1045
757 time_t lowest_lru; 1046 kenter("%d,%s,%s,",
758 size_t size; 1047 keyring->serial, index_key->type->name, index_key->description);
759 int loop, lru, ret;
760 1048
761 kenter("%d,%s,%s,", key_serial(keyring), type->name, description); 1049 BUG_ON(index_key->desc_len == 0);
762 1050
763 if (keyring->type != &key_type_keyring) 1051 if (keyring->type != &key_type_keyring)
764 return -ENOTDIR; 1052 return -ENOTDIR;
@@ -771,100 +1059,39 @@ int __key_link_begin(struct key *keyring, const struct key_type *type,
771 1059
772 /* serialise link/link calls to prevent parallel calls causing a cycle 1060 /* serialise link/link calls to prevent parallel calls causing a cycle
773 * when linking two keyring in opposite orders */ 1061 * when linking two keyring in opposite orders */
774 if (type == &key_type_keyring) 1062 if (index_key->type == &key_type_keyring)
775 down_write(&keyring_serialise_link_sem); 1063 down_write(&keyring_serialise_link_sem);
776 1064
777 klist = rcu_dereference_locked_keyring(keyring); 1065 /* Create an edit script that will insert/replace the key in the
778 1066 * keyring tree.
779 /* see if there's a matching key we can displace */ 1067 */
780 lru = -1; 1068 edit = assoc_array_insert(&keyring->keys,
781 if (klist && klist->nkeys > 0) { 1069 &keyring_assoc_array_ops,
782 lowest_lru = TIME_T_MAX; 1070 index_key,
783 for (loop = klist->nkeys - 1; loop >= 0; loop--) { 1071 NULL);
784 struct key *key = rcu_deref_link_locked(klist, loop, 1072 if (IS_ERR(edit)) {
785 keyring); 1073 ret = PTR_ERR(edit);
786 if (key->type == type &&
787 strcmp(key->description, description) == 0) {
788 /* Found a match - we'll replace the link with
789 * one to the new key. We record the slot
790 * position.
791 */
792 klist->delkey = loop;
793 prealloc = 0;
794 goto done;
795 }
796 if (key->last_used_at < lowest_lru) {
797 lowest_lru = key->last_used_at;
798 lru = loop;
799 }
800 }
801 }
802
803 /* If the keyring is full then do an LRU discard */
804 if (klist &&
805 klist->nkeys == klist->maxkeys &&
806 klist->maxkeys >= MAX_KEYRING_LINKS) {
807 kdebug("LRU discard %d\n", lru);
808 klist->delkey = lru;
809 prealloc = 0;
810 goto done;
811 }
812
813 /* check that we aren't going to overrun the user's quota */
814 ret = key_payload_reserve(keyring,
815 keyring->datalen + KEYQUOTA_LINK_BYTES);
816 if (ret < 0)
817 goto error_sem; 1074 goto error_sem;
1075 }
818 1076
819 if (klist && klist->nkeys < klist->maxkeys) { 1077 /* If we're not replacing a link in-place then we're going to need some
820 /* there's sufficient slack space to append directly */ 1078 * extra quota.
821 klist->delkey = klist->nkeys; 1079 */
822 prealloc = KEY_LINK_FIXQUOTA; 1080 if (!edit->dead_leaf) {
823 } else { 1081 ret = key_payload_reserve(keyring,
824 /* grow the key list */ 1082 keyring->datalen + KEYQUOTA_LINK_BYTES);
825 max = 4; 1083 if (ret < 0)
826 if (klist) { 1084 goto error_cancel;
827 max += klist->maxkeys;
828 if (max > MAX_KEYRING_LINKS)
829 max = MAX_KEYRING_LINKS;
830 BUG_ON(max <= klist->maxkeys);
831 }
832
833 size = sizeof(*klist) + sizeof(struct key *) * max;
834
835 ret = -ENOMEM;
836 nklist = kmalloc(size, GFP_KERNEL);
837 if (!nklist)
838 goto error_quota;
839
840 nklist->maxkeys = max;
841 if (klist) {
842 memcpy(nklist->keys, klist->keys,
843 sizeof(struct key *) * klist->nkeys);
844 nklist->delkey = klist->nkeys;
845 nklist->nkeys = klist->nkeys + 1;
846 klist->delkey = USHRT_MAX;
847 } else {
848 nklist->nkeys = 1;
849 nklist->delkey = 0;
850 }
851
852 /* add the key into the new space */
853 RCU_INIT_POINTER(nklist->keys[nklist->delkey], NULL);
854 prealloc = (unsigned long)nklist | KEY_LINK_FIXQUOTA;
855 } 1085 }
856 1086
857done: 1087 *_edit = edit;
858 *_prealloc = prealloc;
859 kleave(" = 0"); 1088 kleave(" = 0");
860 return 0; 1089 return 0;
861 1090
862error_quota: 1091error_cancel:
863 /* undo the quota changes */ 1092 assoc_array_cancel_edit(edit);
864 key_payload_reserve(keyring,
865 keyring->datalen - KEYQUOTA_LINK_BYTES);
866error_sem: 1093error_sem:
867 if (type == &key_type_keyring) 1094 if (index_key->type == &key_type_keyring)
868 up_write(&keyring_serialise_link_sem); 1095 up_write(&keyring_serialise_link_sem);
869error_krsem: 1096error_krsem:
870 up_write(&keyring->sem); 1097 up_write(&keyring->sem);
@@ -895,60 +1122,12 @@ int __key_link_check_live_key(struct key *keyring, struct key *key)
895 * holds at most one link to any given key of a particular type+description 1122 * holds at most one link to any given key of a particular type+description
896 * combination. 1123 * combination.
897 */ 1124 */
898void __key_link(struct key *keyring, struct key *key, 1125void __key_link(struct key *key, struct assoc_array_edit **_edit)
899 unsigned long *_prealloc)
900{ 1126{
901 struct keyring_list *klist, *nklist; 1127 __key_get(key);
902 struct key *discard; 1128 assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key));
903 1129 assoc_array_apply_edit(*_edit);
904 nklist = (struct keyring_list *)(*_prealloc & ~KEY_LINK_FIXQUOTA); 1130 *_edit = NULL;
905 *_prealloc = 0;
906
907 kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
908
909 klist = rcu_dereference_locked_keyring(keyring);
910
911 atomic_inc(&key->usage);
912 keyring->last_used_at = key->last_used_at =
913 current_kernel_time().tv_sec;
914
915 /* there's a matching key we can displace or an empty slot in a newly
916 * allocated list we can fill */
917 if (nklist) {
918 kdebug("reissue %hu/%hu/%hu",
919 nklist->delkey, nklist->nkeys, nklist->maxkeys);
920
921 RCU_INIT_POINTER(nklist->keys[nklist->delkey], key);
922
923 rcu_assign_pointer(keyring->payload.subscriptions, nklist);
924
925 /* dispose of the old keyring list and, if there was one, the
926 * displaced key */
927 if (klist) {
928 kdebug("dispose %hu/%hu/%hu",
929 klist->delkey, klist->nkeys, klist->maxkeys);
930 call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
931 }
932 } else if (klist->delkey < klist->nkeys) {
933 kdebug("replace %hu/%hu/%hu",
934 klist->delkey, klist->nkeys, klist->maxkeys);
935
936 discard = rcu_dereference_protected(
937 klist->keys[klist->delkey],
938 rwsem_is_locked(&keyring->sem));
939 rcu_assign_pointer(klist->keys[klist->delkey], key);
940 /* The garbage collector will take care of RCU
941 * synchronisation */
942 key_put(discard);
943 } else {
944 /* there's sufficient slack space to append directly */
945 kdebug("append %hu/%hu/%hu",
946 klist->delkey, klist->nkeys, klist->maxkeys);
947
948 RCU_INIT_POINTER(klist->keys[klist->delkey], key);
949 smp_wmb();
950 klist->nkeys++;
951 }
952} 1131}
953 1132
954/* 1133/*
@@ -956,24 +1135,22 @@ void __key_link(struct key *keyring, struct key *key,
956 * 1135 *
957 * Must be called with __key_link_begin() having being called. 1136 * Must be called with __key_link_begin() having being called.
958 */ 1137 */
959void __key_link_end(struct key *keyring, struct key_type *type, 1138void __key_link_end(struct key *keyring,
960 unsigned long prealloc) 1139 const struct keyring_index_key *index_key,
1140 struct assoc_array_edit *edit)
961 __releases(&keyring->sem) 1141 __releases(&keyring->sem)
962 __releases(&keyring_serialise_link_sem) 1142 __releases(&keyring_serialise_link_sem)
963{ 1143{
964 BUG_ON(type == NULL); 1144 BUG_ON(index_key->type == NULL);
965 BUG_ON(type->name == NULL); 1145 kenter("%d,%s,", keyring->serial, index_key->type->name);
966 kenter("%d,%s,%lx", keyring->serial, type->name, prealloc);
967 1146
968 if (type == &key_type_keyring) 1147 if (index_key->type == &key_type_keyring)
969 up_write(&keyring_serialise_link_sem); 1148 up_write(&keyring_serialise_link_sem);
970 1149
971 if (prealloc) { 1150 if (edit && !edit->dead_leaf) {
972 if (prealloc & KEY_LINK_FIXQUOTA) 1151 key_payload_reserve(keyring,
973 key_payload_reserve(keyring, 1152 keyring->datalen - KEYQUOTA_LINK_BYTES);
974 keyring->datalen - 1153 assoc_array_cancel_edit(edit);
975 KEYQUOTA_LINK_BYTES);
976 kfree((struct keyring_list *)(prealloc & ~KEY_LINK_FIXQUOTA));
977 } 1154 }
978 up_write(&keyring->sem); 1155 up_write(&keyring->sem);
979} 1156}
@@ -1000,20 +1177,28 @@ void __key_link_end(struct key *keyring, struct key_type *type,
1000 */ 1177 */
1001int key_link(struct key *keyring, struct key *key) 1178int key_link(struct key *keyring, struct key *key)
1002{ 1179{
1003 unsigned long prealloc; 1180 struct assoc_array_edit *edit;
1004 int ret; 1181 int ret;
1005 1182
1183 kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage));
1184
1006 key_check(keyring); 1185 key_check(keyring);
1007 key_check(key); 1186 key_check(key);
1008 1187
1009 ret = __key_link_begin(keyring, key->type, key->description, &prealloc); 1188 if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) &&
1189 !test_bit(KEY_FLAG_TRUSTED, &key->flags))
1190 return -EPERM;
1191
1192 ret = __key_link_begin(keyring, &key->index_key, &edit);
1010 if (ret == 0) { 1193 if (ret == 0) {
1194 kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage));
1011 ret = __key_link_check_live_key(keyring, key); 1195 ret = __key_link_check_live_key(keyring, key);
1012 if (ret == 0) 1196 if (ret == 0)
1013 __key_link(keyring, key, &prealloc); 1197 __key_link(key, &edit);
1014 __key_link_end(keyring, key->type, prealloc); 1198 __key_link_end(keyring, &key->index_key, edit);
1015 } 1199 }
1016 1200
1201 kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage));
1017 return ret; 1202 return ret;
1018} 1203}
1019EXPORT_SYMBOL(key_link); 1204EXPORT_SYMBOL(key_link);
@@ -1037,90 +1222,37 @@ EXPORT_SYMBOL(key_link);
1037 */ 1222 */
1038int key_unlink(struct key *keyring, struct key *key) 1223int key_unlink(struct key *keyring, struct key *key)
1039{ 1224{
1040 struct keyring_list *klist, *nklist; 1225 struct assoc_array_edit *edit;
1041 int loop, ret; 1226 int ret;
1042 1227
1043 key_check(keyring); 1228 key_check(keyring);
1044 key_check(key); 1229 key_check(key);
1045 1230
1046 ret = -ENOTDIR;
1047 if (keyring->type != &key_type_keyring) 1231 if (keyring->type != &key_type_keyring)
1048 goto error; 1232 return -ENOTDIR;
1049 1233
1050 down_write(&keyring->sem); 1234 down_write(&keyring->sem);
1051 1235
1052 klist = rcu_dereference_locked_keyring(keyring); 1236 edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
1053 if (klist) { 1237 &key->index_key);
1054 /* search the keyring for the key */ 1238 if (IS_ERR(edit)) {
1055 for (loop = 0; loop < klist->nkeys; loop++) 1239 ret = PTR_ERR(edit);
1056 if (rcu_access_pointer(klist->keys[loop]) == key) 1240 goto error;
1057 goto key_is_present;
1058 } 1241 }
1059
1060 up_write(&keyring->sem);
1061 ret = -ENOENT; 1242 ret = -ENOENT;
1062 goto error; 1243 if (edit == NULL)
1063 1244 goto error;
1064key_is_present:
1065 /* we need to copy the key list for RCU purposes */
1066 nklist = kmalloc(sizeof(*klist) +
1067 sizeof(struct key *) * klist->maxkeys,
1068 GFP_KERNEL);
1069 if (!nklist)
1070 goto nomem;
1071 nklist->maxkeys = klist->maxkeys;
1072 nklist->nkeys = klist->nkeys - 1;
1073
1074 if (loop > 0)
1075 memcpy(&nklist->keys[0],
1076 &klist->keys[0],
1077 loop * sizeof(struct key *));
1078
1079 if (loop < nklist->nkeys)
1080 memcpy(&nklist->keys[loop],
1081 &klist->keys[loop + 1],
1082 (nklist->nkeys - loop) * sizeof(struct key *));
1083
1084 /* adjust the user's quota */
1085 key_payload_reserve(keyring,
1086 keyring->datalen - KEYQUOTA_LINK_BYTES);
1087
1088 rcu_assign_pointer(keyring->payload.subscriptions, nklist);
1089
1090 up_write(&keyring->sem);
1091
1092 /* schedule for later cleanup */
1093 klist->delkey = loop;
1094 call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
1095 1245
1246 assoc_array_apply_edit(edit);
1247 key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
1096 ret = 0; 1248 ret = 0;
1097 1249
1098error: 1250error:
1099 return ret;
1100nomem:
1101 ret = -ENOMEM;
1102 up_write(&keyring->sem); 1251 up_write(&keyring->sem);
1103 goto error; 1252 return ret;
1104} 1253}
1105EXPORT_SYMBOL(key_unlink); 1254EXPORT_SYMBOL(key_unlink);
1106 1255
1107/*
1108 * Dispose of a keyring list after the RCU grace period, releasing the keys it
1109 * links to.
1110 */
1111static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
1112{
1113 struct keyring_list *klist;
1114 int loop;
1115
1116 klist = container_of(rcu, struct keyring_list, rcu);
1117
1118 for (loop = klist->nkeys - 1; loop >= 0; loop--)
1119 key_put(rcu_access_pointer(klist->keys[loop]));
1120
1121 kfree(klist);
1122}
1123
1124/** 1256/**
1125 * keyring_clear - Clear a keyring 1257 * keyring_clear - Clear a keyring
1126 * @keyring: The keyring to clear. 1258 * @keyring: The keyring to clear.
@@ -1131,33 +1263,25 @@ static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
1131 */ 1263 */
1132int keyring_clear(struct key *keyring) 1264int keyring_clear(struct key *keyring)
1133{ 1265{
1134 struct keyring_list *klist; 1266 struct assoc_array_edit *edit;
1135 int ret; 1267 int ret;
1136 1268
1137 ret = -ENOTDIR; 1269 if (keyring->type != &key_type_keyring)
1138 if (keyring->type == &key_type_keyring) { 1270 return -ENOTDIR;
1139 /* detach the pointer block with the locks held */
1140 down_write(&keyring->sem);
1141
1142 klist = rcu_dereference_locked_keyring(keyring);
1143 if (klist) {
1144 /* adjust the quota */
1145 key_payload_reserve(keyring,
1146 sizeof(struct keyring_list));
1147
1148 rcu_assign_pointer(keyring->payload.subscriptions,
1149 NULL);
1150 }
1151
1152 up_write(&keyring->sem);
1153 1271
1154 /* free the keys after the locks have been dropped */ 1272 down_write(&keyring->sem);
1155 if (klist)
1156 call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
1157 1273
1274 edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
1275 if (IS_ERR(edit)) {
1276 ret = PTR_ERR(edit);
1277 } else {
1278 if (edit)
1279 assoc_array_apply_edit(edit);
1280 key_payload_reserve(keyring, 0);
1158 ret = 0; 1281 ret = 0;
1159 } 1282 }
1160 1283
1284 up_write(&keyring->sem);
1161 return ret; 1285 return ret;
1162} 1286}
1163EXPORT_SYMBOL(keyring_clear); 1287EXPORT_SYMBOL(keyring_clear);
@@ -1169,111 +1293,68 @@ EXPORT_SYMBOL(keyring_clear);
1169 */ 1293 */
1170static void keyring_revoke(struct key *keyring) 1294static void keyring_revoke(struct key *keyring)
1171{ 1295{
1172 struct keyring_list *klist; 1296 struct assoc_array_edit *edit;
1297
1298 edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
1299 if (!IS_ERR(edit)) {
1300 if (edit)
1301 assoc_array_apply_edit(edit);
1302 key_payload_reserve(keyring, 0);
1303 }
1304}
1305
1306static bool keyring_gc_select_iterator(void *object, void *iterator_data)
1307{
1308 struct key *key = keyring_ptr_to_key(object);
1309 time_t *limit = iterator_data;
1173 1310
1174 klist = rcu_dereference_locked_keyring(keyring); 1311 if (key_is_dead(key, *limit))
1312 return false;
1313 key_get(key);
1314 return true;
1315}
1175 1316
1176 /* adjust the quota */ 1317static int keyring_gc_check_iterator(const void *object, void *iterator_data)
1177 key_payload_reserve(keyring, 0); 1318{
1319 const struct key *key = keyring_ptr_to_key(object);
1320 time_t *limit = iterator_data;
1178 1321
1179 if (klist) { 1322 key_check(key);
1180 rcu_assign_pointer(keyring->payload.subscriptions, NULL); 1323 return key_is_dead(key, *limit);
1181 call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
1182 }
1183} 1324}
1184 1325
1185/* 1326/*
1186 * Collect garbage from the contents of a keyring, replacing the old list with 1327 * Garbage collect pointers from a keyring.
1187 * a new one with the pointers all shuffled down.
1188 * 1328 *
1189 * Dead keys are classed as oned that are flagged as being dead or are revoked, 1329 * Not called with any locks held. The keyring's key struct will not be
1190 * expired or negative keys that were revoked or expired before the specified 1330 * deallocated under us as only our caller may deallocate it.
1191 * limit.
1192 */ 1331 */
1193void keyring_gc(struct key *keyring, time_t limit) 1332void keyring_gc(struct key *keyring, time_t limit)
1194{ 1333{
1195 struct keyring_list *klist, *new; 1334 int result;
1196 struct key *key;
1197 int loop, keep, max;
1198
1199 kenter("{%x,%s}", key_serial(keyring), keyring->description);
1200
1201 down_write(&keyring->sem);
1202
1203 klist = rcu_dereference_locked_keyring(keyring);
1204 if (!klist)
1205 goto no_klist;
1206
1207 /* work out how many subscriptions we're keeping */
1208 keep = 0;
1209 for (loop = klist->nkeys - 1; loop >= 0; loop--)
1210 if (!key_is_dead(rcu_deref_link_locked(klist, loop, keyring),
1211 limit))
1212 keep++;
1213
1214 if (keep == klist->nkeys)
1215 goto just_return;
1216
1217 /* allocate a new keyring payload */
1218 max = roundup(keep, 4);
1219 new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *),
1220 GFP_KERNEL);
1221 if (!new)
1222 goto nomem;
1223 new->maxkeys = max;
1224 new->nkeys = 0;
1225 new->delkey = 0;
1226
1227 /* install the live keys
1228 * - must take care as expired keys may be updated back to life
1229 */
1230 keep = 0;
1231 for (loop = klist->nkeys - 1; loop >= 0; loop--) {
1232 key = rcu_deref_link_locked(klist, loop, keyring);
1233 if (!key_is_dead(key, limit)) {
1234 if (keep >= max)
1235 goto discard_new;
1236 RCU_INIT_POINTER(new->keys[keep++], key_get(key));
1237 }
1238 }
1239 new->nkeys = keep;
1240
1241 /* adjust the quota */
1242 key_payload_reserve(keyring,
1243 sizeof(struct keyring_list) +
1244 KEYQUOTA_LINK_BYTES * keep);
1245 1335
1246 if (keep == 0) { 1336 kenter("%x{%s}", keyring->serial, keyring->description ?: "");
1247 rcu_assign_pointer(keyring->payload.subscriptions, NULL);
1248 kfree(new);
1249 } else {
1250 rcu_assign_pointer(keyring->payload.subscriptions, new);
1251 }
1252 1337
1253 up_write(&keyring->sem); 1338 if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
1339 (1 << KEY_FLAG_REVOKED)))
1340 goto dont_gc;
1254 1341
1255 call_rcu(&klist->rcu, keyring_clear_rcu_disposal); 1342 /* scan the keyring looking for dead keys */
1256 kleave(" [yes]"); 1343 rcu_read_lock();
1257 return; 1344 result = assoc_array_iterate(&keyring->keys,
1258 1345 keyring_gc_check_iterator, &limit);
1259discard_new: 1346 rcu_read_unlock();
1260 new->nkeys = keep; 1347 if (result == true)
1261 keyring_clear_rcu_disposal(&new->rcu); 1348 goto do_gc;
1262 up_write(&keyring->sem);
1263 kleave(" [discard]");
1264 return;
1265
1266just_return:
1267 up_write(&keyring->sem);
1268 kleave(" [no dead]");
1269 return;
1270 1349
1271no_klist: 1350dont_gc:
1272 up_write(&keyring->sem); 1351 kleave(" [no gc]");
1273 kleave(" [no_klist]");
1274 return; 1352 return;
1275 1353
1276nomem: 1354do_gc:
1355 down_write(&keyring->sem);
1356 assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops,
1357 keyring_gc_select_iterator, &limit);
1277 up_write(&keyring->sem); 1358 up_write(&keyring->sem);
1278 kleave(" [oom]"); 1359 kleave(" [gc]");
1279} 1360}
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
new file mode 100644
index 000000000000..0ad3ee283781
--- /dev/null
+++ b/security/keys/persistent.c
@@ -0,0 +1,167 @@
1/* General persistent per-UID keyrings register
2 *
3 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/user_namespace.h>
13#include "internal.h"
14
15unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */
16
17/*
18 * Create the persistent keyring register for the current user namespace.
19 *
20 * Called with the namespace's sem locked for writing.
21 */
22static int key_create_persistent_register(struct user_namespace *ns)
23{
24 struct key *reg = keyring_alloc(".persistent_register",
25 KUIDT_INIT(0), KGIDT_INIT(0),
26 current_cred(),
27 ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
28 KEY_USR_VIEW | KEY_USR_READ),
29 KEY_ALLOC_NOT_IN_QUOTA, NULL);
30 if (IS_ERR(reg))
31 return PTR_ERR(reg);
32
33 ns->persistent_keyring_register = reg;
34 return 0;
35}
36
37/*
38 * Create the persistent keyring for the specified user.
39 *
40 * Called with the namespace's sem locked for writing.
41 */
42static key_ref_t key_create_persistent(struct user_namespace *ns, kuid_t uid,
43 struct keyring_index_key *index_key)
44{
45 struct key *persistent;
46 key_ref_t reg_ref, persistent_ref;
47
48 if (!ns->persistent_keyring_register) {
49 long err = key_create_persistent_register(ns);
50 if (err < 0)
51 return ERR_PTR(err);
52 } else {
53 reg_ref = make_key_ref(ns->persistent_keyring_register, true);
54 persistent_ref = find_key_to_update(reg_ref, index_key);
55 if (persistent_ref)
56 return persistent_ref;
57 }
58
59 persistent = keyring_alloc(index_key->description,
60 uid, INVALID_GID, current_cred(),
61 ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
62 KEY_USR_VIEW | KEY_USR_READ),
63 KEY_ALLOC_NOT_IN_QUOTA,
64 ns->persistent_keyring_register);
65 if (IS_ERR(persistent))
66 return ERR_CAST(persistent);
67
68 return make_key_ref(persistent, true);
69}
70
71/*
72 * Get the persistent keyring for a specific UID and link it to the nominated
73 * keyring.
74 */
75static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
76 key_ref_t dest_ref)
77{
78 struct keyring_index_key index_key;
79 struct key *persistent;
80 key_ref_t reg_ref, persistent_ref;
81 char buf[32];
82 long ret;
83
84 /* Look in the register if it exists */
85 index_key.type = &key_type_keyring;
86 index_key.description = buf;
87 index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid));
88
89 if (ns->persistent_keyring_register) {
90 reg_ref = make_key_ref(ns->persistent_keyring_register, true);
91 down_read(&ns->persistent_keyring_register_sem);
92 persistent_ref = find_key_to_update(reg_ref, &index_key);
93 up_read(&ns->persistent_keyring_register_sem);
94
95 if (persistent_ref)
96 goto found;
97 }
98
99 /* It wasn't in the register, so we'll need to create it. We might
100 * also need to create the register.
101 */
102 down_write(&ns->persistent_keyring_register_sem);
103 persistent_ref = key_create_persistent(ns, uid, &index_key);
104 up_write(&ns->persistent_keyring_register_sem);
105 if (!IS_ERR(persistent_ref))
106 goto found;
107
108 return PTR_ERR(persistent_ref);
109
110found:
111 ret = key_task_permission(persistent_ref, current_cred(), KEY_LINK);
112 if (ret == 0) {
113 persistent = key_ref_to_ptr(persistent_ref);
114 ret = key_link(key_ref_to_ptr(dest_ref), persistent);
115 if (ret == 0) {
116 key_set_timeout(persistent, persistent_keyring_expiry);
117 ret = persistent->serial;
118 }
119 }
120
121 key_ref_put(persistent_ref);
122 return ret;
123}
124
125/*
126 * Get the persistent keyring for a specific UID and link it to the nominated
127 * keyring.
128 */
129long keyctl_get_persistent(uid_t _uid, key_serial_t destid)
130{
131 struct user_namespace *ns = current_user_ns();
132 key_ref_t dest_ref;
133 kuid_t uid;
134 long ret;
135
136 /* -1 indicates the current user */
137 if (_uid == (uid_t)-1) {
138 uid = current_uid();
139 } else {
140 uid = make_kuid(ns, _uid);
141 if (!uid_valid(uid))
142 return -EINVAL;
143
144 /* You can only see your own persistent cache if you're not
145 * sufficiently privileged.
146 */
147 if (!uid_eq(uid, current_uid()) &&
148 !uid_eq(uid, current_euid()) &&
149 !ns_capable(ns, CAP_SETUID))
150 return -EPERM;
151 }
152
153 /* There must be a destination keyring */
154 dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_WRITE);
155 if (IS_ERR(dest_ref))
156 return PTR_ERR(dest_ref);
157 if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) {
158 ret = -ENOTDIR;
159 goto out_put_dest;
160 }
161
162 ret = key_get_persistent(ns, uid, dest_ref);
163
164out_put_dest:
165 key_ref_put(dest_ref);
166 return ret;
167}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 217b6855e815..88e9a466940f 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -182,7 +182,6 @@ static void proc_keys_stop(struct seq_file *p, void *v)
182 182
183static int proc_keys_show(struct seq_file *m, void *v) 183static int proc_keys_show(struct seq_file *m, void *v)
184{ 184{
185 const struct cred *cred = current_cred();
186 struct rb_node *_p = v; 185 struct rb_node *_p = v;
187 struct key *key = rb_entry(_p, struct key, serial_node); 186 struct key *key = rb_entry(_p, struct key, serial_node);
188 struct timespec now; 187 struct timespec now;
@@ -191,15 +190,23 @@ static int proc_keys_show(struct seq_file *m, void *v)
191 char xbuf[12]; 190 char xbuf[12];
192 int rc; 191 int rc;
193 192
193 struct keyring_search_context ctx = {
194 .index_key.type = key->type,
195 .index_key.description = key->description,
196 .cred = current_cred(),
197 .match = lookup_user_key_possessed,
198 .match_data = key,
199 .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
200 KEYRING_SEARCH_LOOKUP_DIRECT),
201 };
202
194 key_ref = make_key_ref(key, 0); 203 key_ref = make_key_ref(key, 0);
195 204
196 /* determine if the key is possessed by this process (a test we can 205 /* determine if the key is possessed by this process (a test we can
197 * skip if the key does not indicate the possessor can view it 206 * skip if the key does not indicate the possessor can view it
198 */ 207 */
199 if (key->perm & KEY_POS_VIEW) { 208 if (key->perm & KEY_POS_VIEW) {
200 skey_ref = search_my_process_keyrings(key->type, key, 209 skey_ref = search_my_process_keyrings(&ctx);
201 lookup_user_key_possessed,
202 true, cred);
203 if (!IS_ERR(skey_ref)) { 210 if (!IS_ERR(skey_ref)) {
204 key_ref_put(skey_ref); 211 key_ref_put(skey_ref);
205 key_ref = make_key_ref(key, 1); 212 key_ref = make_key_ref(key, 1);
@@ -211,7 +218,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
211 * - the caller holds a spinlock, and thus the RCU read lock, making our 218 * - the caller holds a spinlock, and thus the RCU read lock, making our
212 * access to __current_cred() safe 219 * access to __current_cred() safe
213 */ 220 */
214 rc = key_task_permission(key_ref, cred, KEY_VIEW); 221 rc = key_task_permission(key_ref, ctx.cred, KEY_VIEW);
215 if (rc < 0) 222 if (rc < 0)
216 return 0; 223 return 0;
217 224
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 42defae1e161..0cf8a130a267 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -235,7 +235,7 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
235 if (IS_ERR(keyring)) 235 if (IS_ERR(keyring))
236 return PTR_ERR(keyring); 236 return PTR_ERR(keyring);
237 } else { 237 } else {
238 atomic_inc(&keyring->usage); 238 __key_get(keyring);
239 } 239 }
240 240
241 /* install the keyring */ 241 /* install the keyring */
@@ -319,11 +319,7 @@ void key_fsgid_changed(struct task_struct *tsk)
319 * In the case of a successful return, the possession attribute is set on the 319 * In the case of a successful return, the possession attribute is set on the
320 * returned key reference. 320 * returned key reference.
321 */ 321 */
322key_ref_t search_my_process_keyrings(struct key_type *type, 322key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
323 const void *description,
324 key_match_func_t match,
325 bool no_state_check,
326 const struct cred *cred)
327{ 323{
328 key_ref_t key_ref, ret, err; 324 key_ref_t key_ref, ret, err;
329 325
@@ -339,10 +335,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
339 err = ERR_PTR(-EAGAIN); 335 err = ERR_PTR(-EAGAIN);
340 336
341 /* search the thread keyring first */ 337 /* search the thread keyring first */
342 if (cred->thread_keyring) { 338 if (ctx->cred->thread_keyring) {
343 key_ref = keyring_search_aux( 339 key_ref = keyring_search_aux(
344 make_key_ref(cred->thread_keyring, 1), 340 make_key_ref(ctx->cred->thread_keyring, 1), ctx);
345 cred, type, description, match, no_state_check);
346 if (!IS_ERR(key_ref)) 341 if (!IS_ERR(key_ref))
347 goto found; 342 goto found;
348 343
@@ -358,10 +353,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
358 } 353 }
359 354
360 /* search the process keyring second */ 355 /* search the process keyring second */
361 if (cred->process_keyring) { 356 if (ctx->cred->process_keyring) {
362 key_ref = keyring_search_aux( 357 key_ref = keyring_search_aux(
363 make_key_ref(cred->process_keyring, 1), 358 make_key_ref(ctx->cred->process_keyring, 1), ctx);
364 cred, type, description, match, no_state_check);
365 if (!IS_ERR(key_ref)) 359 if (!IS_ERR(key_ref))
366 goto found; 360 goto found;
367 361
@@ -379,11 +373,11 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
379 } 373 }
380 374
381 /* search the session keyring */ 375 /* search the session keyring */
382 if (cred->session_keyring) { 376 if (ctx->cred->session_keyring) {
383 rcu_read_lock(); 377 rcu_read_lock();
384 key_ref = keyring_search_aux( 378 key_ref = keyring_search_aux(
385 make_key_ref(rcu_dereference(cred->session_keyring), 1), 379 make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1),
386 cred, type, description, match, no_state_check); 380 ctx);
387 rcu_read_unlock(); 381 rcu_read_unlock();
388 382
389 if (!IS_ERR(key_ref)) 383 if (!IS_ERR(key_ref))
@@ -402,10 +396,10 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
402 } 396 }
403 } 397 }
404 /* or search the user-session keyring */ 398 /* or search the user-session keyring */
405 else if (cred->user->session_keyring) { 399 else if (ctx->cred->user->session_keyring) {
406 key_ref = keyring_search_aux( 400 key_ref = keyring_search_aux(
407 make_key_ref(cred->user->session_keyring, 1), 401 make_key_ref(ctx->cred->user->session_keyring, 1),
408 cred, type, description, match, no_state_check); 402 ctx);
409 if (!IS_ERR(key_ref)) 403 if (!IS_ERR(key_ref))
410 goto found; 404 goto found;
411 405
@@ -437,18 +431,14 @@ found:
437 * 431 *
438 * Return same as search_my_process_keyrings(). 432 * Return same as search_my_process_keyrings().
439 */ 433 */
440key_ref_t search_process_keyrings(struct key_type *type, 434key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
441 const void *description,
442 key_match_func_t match,
443 const struct cred *cred)
444{ 435{
445 struct request_key_auth *rka; 436 struct request_key_auth *rka;
446 key_ref_t key_ref, ret = ERR_PTR(-EACCES), err; 437 key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
447 438
448 might_sleep(); 439 might_sleep();
449 440
450 key_ref = search_my_process_keyrings(type, description, match, 441 key_ref = search_my_process_keyrings(ctx);
451 false, cred);
452 if (!IS_ERR(key_ref)) 442 if (!IS_ERR(key_ref))
453 goto found; 443 goto found;
454 err = key_ref; 444 err = key_ref;
@@ -457,18 +447,21 @@ key_ref_t search_process_keyrings(struct key_type *type,
457 * search the keyrings of the process mentioned there 447 * search the keyrings of the process mentioned there
458 * - we don't permit access to request_key auth keys via this method 448 * - we don't permit access to request_key auth keys via this method
459 */ 449 */
460 if (cred->request_key_auth && 450 if (ctx->cred->request_key_auth &&
461 cred == current_cred() && 451 ctx->cred == current_cred() &&
462 type != &key_type_request_key_auth 452 ctx->index_key.type != &key_type_request_key_auth
463 ) { 453 ) {
454 const struct cred *cred = ctx->cred;
455
464 /* defend against the auth key being revoked */ 456 /* defend against the auth key being revoked */
465 down_read(&cred->request_key_auth->sem); 457 down_read(&cred->request_key_auth->sem);
466 458
467 if (key_validate(cred->request_key_auth) == 0) { 459 if (key_validate(ctx->cred->request_key_auth) == 0) {
468 rka = cred->request_key_auth->payload.data; 460 rka = ctx->cred->request_key_auth->payload.data;
469 461
470 key_ref = search_process_keyrings(type, description, 462 ctx->cred = rka->cred;
471 match, rka->cred); 463 key_ref = search_process_keyrings(ctx);
464 ctx->cred = cred;
472 465
473 up_read(&cred->request_key_auth->sem); 466 up_read(&cred->request_key_auth->sem);
474 467
@@ -522,19 +515,23 @@ int lookup_user_key_possessed(const struct key *key, const void *target)
522key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags, 515key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
523 key_perm_t perm) 516 key_perm_t perm)
524{ 517{
518 struct keyring_search_context ctx = {
519 .match = lookup_user_key_possessed,
520 .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
521 KEYRING_SEARCH_LOOKUP_DIRECT),
522 };
525 struct request_key_auth *rka; 523 struct request_key_auth *rka;
526 const struct cred *cred;
527 struct key *key; 524 struct key *key;
528 key_ref_t key_ref, skey_ref; 525 key_ref_t key_ref, skey_ref;
529 int ret; 526 int ret;
530 527
531try_again: 528try_again:
532 cred = get_current_cred(); 529 ctx.cred = get_current_cred();
533 key_ref = ERR_PTR(-ENOKEY); 530 key_ref = ERR_PTR(-ENOKEY);
534 531
535 switch (id) { 532 switch (id) {
536 case KEY_SPEC_THREAD_KEYRING: 533 case KEY_SPEC_THREAD_KEYRING:
537 if (!cred->thread_keyring) { 534 if (!ctx.cred->thread_keyring) {
538 if (!(lflags & KEY_LOOKUP_CREATE)) 535 if (!(lflags & KEY_LOOKUP_CREATE))
539 goto error; 536 goto error;
540 537
@@ -546,13 +543,13 @@ try_again:
546 goto reget_creds; 543 goto reget_creds;
547 } 544 }
548 545
549 key = cred->thread_keyring; 546 key = ctx.cred->thread_keyring;
550 atomic_inc(&key->usage); 547 __key_get(key);
551 key_ref = make_key_ref(key, 1); 548 key_ref = make_key_ref(key, 1);
552 break; 549 break;
553 550
554 case KEY_SPEC_PROCESS_KEYRING: 551 case KEY_SPEC_PROCESS_KEYRING:
555 if (!cred->process_keyring) { 552 if (!ctx.cred->process_keyring) {
556 if (!(lflags & KEY_LOOKUP_CREATE)) 553 if (!(lflags & KEY_LOOKUP_CREATE))
557 goto error; 554 goto error;
558 555
@@ -564,13 +561,13 @@ try_again:
564 goto reget_creds; 561 goto reget_creds;
565 } 562 }
566 563
567 key = cred->process_keyring; 564 key = ctx.cred->process_keyring;
568 atomic_inc(&key->usage); 565 __key_get(key);
569 key_ref = make_key_ref(key, 1); 566 key_ref = make_key_ref(key, 1);
570 break; 567 break;
571 568
572 case KEY_SPEC_SESSION_KEYRING: 569 case KEY_SPEC_SESSION_KEYRING:
573 if (!cred->session_keyring) { 570 if (!ctx.cred->session_keyring) {
574 /* always install a session keyring upon access if one 571 /* always install a session keyring upon access if one
575 * doesn't exist yet */ 572 * doesn't exist yet */
576 ret = install_user_keyrings(); 573 ret = install_user_keyrings();
@@ -580,13 +577,13 @@ try_again:
580 ret = join_session_keyring(NULL); 577 ret = join_session_keyring(NULL);
581 else 578 else
582 ret = install_session_keyring( 579 ret = install_session_keyring(
583 cred->user->session_keyring); 580 ctx.cred->user->session_keyring);
584 581
585 if (ret < 0) 582 if (ret < 0)
586 goto error; 583 goto error;
587 goto reget_creds; 584 goto reget_creds;
588 } else if (cred->session_keyring == 585 } else if (ctx.cred->session_keyring ==
589 cred->user->session_keyring && 586 ctx.cred->user->session_keyring &&
590 lflags & KEY_LOOKUP_CREATE) { 587 lflags & KEY_LOOKUP_CREATE) {
591 ret = join_session_keyring(NULL); 588 ret = join_session_keyring(NULL);
592 if (ret < 0) 589 if (ret < 0)
@@ -595,33 +592,33 @@ try_again:
595 } 592 }
596 593
597 rcu_read_lock(); 594 rcu_read_lock();
598 key = rcu_dereference(cred->session_keyring); 595 key = rcu_dereference(ctx.cred->session_keyring);
599 atomic_inc(&key->usage); 596 __key_get(key);
600 rcu_read_unlock(); 597 rcu_read_unlock();
601 key_ref = make_key_ref(key, 1); 598 key_ref = make_key_ref(key, 1);
602 break; 599 break;
603 600
604 case KEY_SPEC_USER_KEYRING: 601 case KEY_SPEC_USER_KEYRING:
605 if (!cred->user->uid_keyring) { 602 if (!ctx.cred->user->uid_keyring) {
606 ret = install_user_keyrings(); 603 ret = install_user_keyrings();
607 if (ret < 0) 604 if (ret < 0)
608 goto error; 605 goto error;
609 } 606 }
610 607
611 key = cred->user->uid_keyring; 608 key = ctx.cred->user->uid_keyring;
612 atomic_inc(&key->usage); 609 __key_get(key);
613 key_ref = make_key_ref(key, 1); 610 key_ref = make_key_ref(key, 1);
614 break; 611 break;
615 612
616 case KEY_SPEC_USER_SESSION_KEYRING: 613 case KEY_SPEC_USER_SESSION_KEYRING:
617 if (!cred->user->session_keyring) { 614 if (!ctx.cred->user->session_keyring) {
618 ret = install_user_keyrings(); 615 ret = install_user_keyrings();
619 if (ret < 0) 616 if (ret < 0)
620 goto error; 617 goto error;
621 } 618 }
622 619
623 key = cred->user->session_keyring; 620 key = ctx.cred->user->session_keyring;
624 atomic_inc(&key->usage); 621 __key_get(key);
625 key_ref = make_key_ref(key, 1); 622 key_ref = make_key_ref(key, 1);
626 break; 623 break;
627 624
@@ -631,29 +628,29 @@ try_again:
631 goto error; 628 goto error;
632 629
633 case KEY_SPEC_REQKEY_AUTH_KEY: 630 case KEY_SPEC_REQKEY_AUTH_KEY:
634 key = cred->request_key_auth; 631 key = ctx.cred->request_key_auth;
635 if (!key) 632 if (!key)
636 goto error; 633 goto error;
637 634
638 atomic_inc(&key->usage); 635 __key_get(key);
639 key_ref = make_key_ref(key, 1); 636 key_ref = make_key_ref(key, 1);
640 break; 637 break;
641 638
642 case KEY_SPEC_REQUESTOR_KEYRING: 639 case KEY_SPEC_REQUESTOR_KEYRING:
643 if (!cred->request_key_auth) 640 if (!ctx.cred->request_key_auth)
644 goto error; 641 goto error;
645 642
646 down_read(&cred->request_key_auth->sem); 643 down_read(&ctx.cred->request_key_auth->sem);
647 if (test_bit(KEY_FLAG_REVOKED, 644 if (test_bit(KEY_FLAG_REVOKED,
648 &cred->request_key_auth->flags)) { 645 &ctx.cred->request_key_auth->flags)) {
649 key_ref = ERR_PTR(-EKEYREVOKED); 646 key_ref = ERR_PTR(-EKEYREVOKED);
650 key = NULL; 647 key = NULL;
651 } else { 648 } else {
652 rka = cred->request_key_auth->payload.data; 649 rka = ctx.cred->request_key_auth->payload.data;
653 key = rka->dest_keyring; 650 key = rka->dest_keyring;
654 atomic_inc(&key->usage); 651 __key_get(key);
655 } 652 }
656 up_read(&cred->request_key_auth->sem); 653 up_read(&ctx.cred->request_key_auth->sem);
657 if (!key) 654 if (!key)
658 goto error; 655 goto error;
659 key_ref = make_key_ref(key, 1); 656 key_ref = make_key_ref(key, 1);
@@ -673,9 +670,13 @@ try_again:
673 key_ref = make_key_ref(key, 0); 670 key_ref = make_key_ref(key, 0);
674 671
675 /* check to see if we possess the key */ 672 /* check to see if we possess the key */
676 skey_ref = search_process_keyrings(key->type, key, 673 ctx.index_key.type = key->type;
677 lookup_user_key_possessed, 674 ctx.index_key.description = key->description;
678 cred); 675 ctx.index_key.desc_len = strlen(key->description);
676 ctx.match_data = key;
677 kdebug("check possessed");
678 skey_ref = search_process_keyrings(&ctx);
679 kdebug("possessed=%p", skey_ref);
679 680
680 if (!IS_ERR(skey_ref)) { 681 if (!IS_ERR(skey_ref)) {
681 key_put(key); 682 key_put(key);
@@ -715,14 +716,14 @@ try_again:
715 goto invalid_key; 716 goto invalid_key;
716 717
717 /* check the permissions */ 718 /* check the permissions */
718 ret = key_task_permission(key_ref, cred, perm); 719 ret = key_task_permission(key_ref, ctx.cred, perm);
719 if (ret < 0) 720 if (ret < 0)
720 goto invalid_key; 721 goto invalid_key;
721 722
722 key->last_used_at = current_kernel_time().tv_sec; 723 key->last_used_at = current_kernel_time().tv_sec;
723 724
724error: 725error:
725 put_cred(cred); 726 put_cred(ctx.cred);
726 return key_ref; 727 return key_ref;
727 728
728invalid_key: 729invalid_key:
@@ -733,7 +734,7 @@ invalid_key:
733 /* if we attempted to install a keyring, then it may have caused new 734 /* if we attempted to install a keyring, then it may have caused new
734 * creds to be installed */ 735 * creds to be installed */
735reget_creds: 736reget_creds:
736 put_cred(cred); 737 put_cred(ctx.cred);
737 goto try_again; 738 goto try_again;
738} 739}
739 740
@@ -856,3 +857,13 @@ void key_change_session_keyring(struct callback_head *twork)
856 857
857 commit_creds(new); 858 commit_creds(new);
858} 859}
860
861/*
862 * Make sure that root's user and user-session keyrings exist.
863 */
864static int __init init_root_keyring(void)
865{
866 return install_user_keyrings();
867}
868
869late_initcall(init_root_keyring);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index c411f9bb156b..381411941cc1 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -345,33 +345,34 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
345 * May return a key that's already under construction instead if there was a 345 * May return a key that's already under construction instead if there was a
346 * race between two thread calling request_key(). 346 * race between two thread calling request_key().
347 */ 347 */
348static int construct_alloc_key(struct key_type *type, 348static int construct_alloc_key(struct keyring_search_context *ctx,
349 const char *description,
350 struct key *dest_keyring, 349 struct key *dest_keyring,
351 unsigned long flags, 350 unsigned long flags,
352 struct key_user *user, 351 struct key_user *user,
353 struct key **_key) 352 struct key **_key)
354{ 353{
355 const struct cred *cred = current_cred(); 354 struct assoc_array_edit *edit;
356 unsigned long prealloc;
357 struct key *key; 355 struct key *key;
358 key_perm_t perm; 356 key_perm_t perm;
359 key_ref_t key_ref; 357 key_ref_t key_ref;
360 int ret; 358 int ret;
361 359
362 kenter("%s,%s,,,", type->name, description); 360 kenter("%s,%s,,,",
361 ctx->index_key.type->name, ctx->index_key.description);
363 362
364 *_key = NULL; 363 *_key = NULL;
365 mutex_lock(&user->cons_lock); 364 mutex_lock(&user->cons_lock);
366 365
367 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; 366 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
368 perm |= KEY_USR_VIEW; 367 perm |= KEY_USR_VIEW;
369 if (type->read) 368 if (ctx->index_key.type->read)
370 perm |= KEY_POS_READ; 369 perm |= KEY_POS_READ;
371 if (type == &key_type_keyring || type->update) 370 if (ctx->index_key.type == &key_type_keyring ||
371 ctx->index_key.type->update)
372 perm |= KEY_POS_WRITE; 372 perm |= KEY_POS_WRITE;
373 373
374 key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred, 374 key = key_alloc(ctx->index_key.type, ctx->index_key.description,
375 ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
375 perm, flags); 376 perm, flags);
376 if (IS_ERR(key)) 377 if (IS_ERR(key))
377 goto alloc_failed; 378 goto alloc_failed;
@@ -379,8 +380,7 @@ static int construct_alloc_key(struct key_type *type,
379 set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags); 380 set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
380 381
381 if (dest_keyring) { 382 if (dest_keyring) {
382 ret = __key_link_begin(dest_keyring, type, description, 383 ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
383 &prealloc);
384 if (ret < 0) 384 if (ret < 0)
385 goto link_prealloc_failed; 385 goto link_prealloc_failed;
386 } 386 }
@@ -390,16 +390,16 @@ static int construct_alloc_key(struct key_type *type,
390 * waited for locks */ 390 * waited for locks */
391 mutex_lock(&key_construction_mutex); 391 mutex_lock(&key_construction_mutex);
392 392
393 key_ref = search_process_keyrings(type, description, type->match, cred); 393 key_ref = search_process_keyrings(ctx);
394 if (!IS_ERR(key_ref)) 394 if (!IS_ERR(key_ref))
395 goto key_already_present; 395 goto key_already_present;
396 396
397 if (dest_keyring) 397 if (dest_keyring)
398 __key_link(dest_keyring, key, &prealloc); 398 __key_link(key, &edit);
399 399
400 mutex_unlock(&key_construction_mutex); 400 mutex_unlock(&key_construction_mutex);
401 if (dest_keyring) 401 if (dest_keyring)
402 __key_link_end(dest_keyring, type, prealloc); 402 __key_link_end(dest_keyring, &ctx->index_key, edit);
403 mutex_unlock(&user->cons_lock); 403 mutex_unlock(&user->cons_lock);
404 *_key = key; 404 *_key = key;
405 kleave(" = 0 [%d]", key_serial(key)); 405 kleave(" = 0 [%d]", key_serial(key));
@@ -414,8 +414,8 @@ key_already_present:
414 if (dest_keyring) { 414 if (dest_keyring) {
415 ret = __key_link_check_live_key(dest_keyring, key); 415 ret = __key_link_check_live_key(dest_keyring, key);
416 if (ret == 0) 416 if (ret == 0)
417 __key_link(dest_keyring, key, &prealloc); 417 __key_link(key, &edit);
418 __key_link_end(dest_keyring, type, prealloc); 418 __key_link_end(dest_keyring, &ctx->index_key, edit);
419 if (ret < 0) 419 if (ret < 0)
420 goto link_check_failed; 420 goto link_check_failed;
421 } 421 }
@@ -444,8 +444,7 @@ alloc_failed:
444/* 444/*
445 * Commence key construction. 445 * Commence key construction.
446 */ 446 */
447static struct key *construct_key_and_link(struct key_type *type, 447static struct key *construct_key_and_link(struct keyring_search_context *ctx,
448 const char *description,
449 const char *callout_info, 448 const char *callout_info,
450 size_t callout_len, 449 size_t callout_len,
451 void *aux, 450 void *aux,
@@ -464,8 +463,7 @@ static struct key *construct_key_and_link(struct key_type *type,
464 463
465 construct_get_dest_keyring(&dest_keyring); 464 construct_get_dest_keyring(&dest_keyring);
466 465
467 ret = construct_alloc_key(type, description, dest_keyring, flags, user, 466 ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
468 &key);
469 key_user_put(user); 467 key_user_put(user);
470 468
471 if (ret == 0) { 469 if (ret == 0) {
@@ -529,17 +527,24 @@ struct key *request_key_and_link(struct key_type *type,
529 struct key *dest_keyring, 527 struct key *dest_keyring,
530 unsigned long flags) 528 unsigned long flags)
531{ 529{
532 const struct cred *cred = current_cred(); 530 struct keyring_search_context ctx = {
531 .index_key.type = type,
532 .index_key.description = description,
533 .cred = current_cred(),
534 .match = type->match,
535 .match_data = description,
536 .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
537 };
533 struct key *key; 538 struct key *key;
534 key_ref_t key_ref; 539 key_ref_t key_ref;
535 int ret; 540 int ret;
536 541
537 kenter("%s,%s,%p,%zu,%p,%p,%lx", 542 kenter("%s,%s,%p,%zu,%p,%p,%lx",
538 type->name, description, callout_info, callout_len, aux, 543 ctx.index_key.type->name, ctx.index_key.description,
539 dest_keyring, flags); 544 callout_info, callout_len, aux, dest_keyring, flags);
540 545
541 /* search all the process keyrings for a key */ 546 /* search all the process keyrings for a key */
542 key_ref = search_process_keyrings(type, description, type->match, cred); 547 key_ref = search_process_keyrings(&ctx);
543 548
544 if (!IS_ERR(key_ref)) { 549 if (!IS_ERR(key_ref)) {
545 key = key_ref_to_ptr(key_ref); 550 key = key_ref_to_ptr(key_ref);
@@ -562,9 +567,8 @@ struct key *request_key_and_link(struct key_type *type,
562 if (!callout_info) 567 if (!callout_info)
563 goto error; 568 goto error;
564 569
565 key = construct_key_and_link(type, description, callout_info, 570 key = construct_key_and_link(&ctx, callout_info, callout_len,
566 callout_len, aux, dest_keyring, 571 aux, dest_keyring, flags);
567 flags);
568 } 572 }
569 573
570error: 574error:
@@ -592,8 +596,10 @@ int wait_for_key_construction(struct key *key, bool intr)
592 intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 596 intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
593 if (ret < 0) 597 if (ret < 0)
594 return ret; 598 return ret;
595 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) 599 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
600 smp_rmb();
596 return key->type_data.reject_error; 601 return key->type_data.reject_error;
602 }
597 return key_validate(key); 603 return key_validate(key);
598} 604}
599EXPORT_SYMBOL(wait_for_key_construction); 605EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 85730d5a5a59..7495a93b4b90 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <asm/uaccess.h> 19#include <asm/uaccess.h>
20#include "internal.h" 20#include "internal.h"
21#include <keys/user-type.h>
21 22
22static int request_key_auth_instantiate(struct key *, 23static int request_key_auth_instantiate(struct key *,
23 struct key_preparsed_payload *); 24 struct key_preparsed_payload *);
@@ -222,32 +223,26 @@ error_alloc:
222} 223}
223 224
224/* 225/*
225 * See if an authorisation key is associated with a particular key.
226 */
227static int key_get_instantiation_authkey_match(const struct key *key,
228 const void *_id)
229{
230 struct request_key_auth *rka = key->payload.data;
231 key_serial_t id = (key_serial_t)(unsigned long) _id;
232
233 return rka->target_key->serial == id;
234}
235
236/*
237 * Search the current process's keyrings for the authorisation key for 226 * Search the current process's keyrings for the authorisation key for
238 * instantiation of a key. 227 * instantiation of a key.
239 */ 228 */
240struct key *key_get_instantiation_authkey(key_serial_t target_id) 229struct key *key_get_instantiation_authkey(key_serial_t target_id)
241{ 230{
242 const struct cred *cred = current_cred(); 231 char description[16];
232 struct keyring_search_context ctx = {
233 .index_key.type = &key_type_request_key_auth,
234 .index_key.description = description,
235 .cred = current_cred(),
236 .match = user_match,
237 .match_data = description,
238 .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
239 };
243 struct key *authkey; 240 struct key *authkey;
244 key_ref_t authkey_ref; 241 key_ref_t authkey_ref;
245 242
246 authkey_ref = search_process_keyrings( 243 sprintf(description, "%x", target_id);
247 &key_type_request_key_auth, 244
248 (void *) (unsigned long) target_id, 245 authkey_ref = search_process_keyrings(&ctx);
249 key_get_instantiation_authkey_match,
250 cred);
251 246
252 if (IS_ERR(authkey_ref)) { 247 if (IS_ERR(authkey_ref)) {
253 authkey = ERR_CAST(authkey_ref); 248 authkey = ERR_CAST(authkey_ref);
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
index ee32d181764a..8c0af08760c8 100644
--- a/security/keys/sysctl.c
+++ b/security/keys/sysctl.c
@@ -61,5 +61,16 @@ ctl_table key_sysctls[] = {
61 .extra1 = (void *) &zero, 61 .extra1 = (void *) &zero,
62 .extra2 = (void *) &max, 62 .extra2 = (void *) &max,
63 }, 63 },
64#ifdef CONFIG_PERSISTENT_KEYRINGS
65 {
66 .procname = "persistent_keyring_expiry",
67 .data = &persistent_keyring_expiry,
68 .maxlen = sizeof(unsigned),
69 .mode = 0644,
70 .proc_handler = proc_dointvec_minmax,
71 .extra1 = (void *) &zero,
72 .extra2 = (void *) &max,
73 },
74#endif
64 { } 75 { }
65}; 76};
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 55dc88939185..faa2caeb593f 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -25,14 +25,15 @@ static int logon_vet_description(const char *desc);
25 * arbitrary blob of data as the payload 25 * arbitrary blob of data as the payload
26 */ 26 */
27struct key_type key_type_user = { 27struct key_type key_type_user = {
28 .name = "user", 28 .name = "user",
29 .instantiate = user_instantiate, 29 .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
30 .update = user_update, 30 .instantiate = user_instantiate,
31 .match = user_match, 31 .update = user_update,
32 .revoke = user_revoke, 32 .match = user_match,
33 .destroy = user_destroy, 33 .revoke = user_revoke,
34 .describe = user_describe, 34 .destroy = user_destroy,
35 .read = user_read, 35 .describe = user_describe,
36 .read = user_read,
36}; 37};
37 38
38EXPORT_SYMBOL_GPL(key_type_user); 39EXPORT_SYMBOL_GPL(key_type_user);
@@ -45,6 +46,7 @@ EXPORT_SYMBOL_GPL(key_type_user);
45 */ 46 */
46struct key_type key_type_logon = { 47struct key_type key_type_logon = {
47 .name = "logon", 48 .name = "logon",
49 .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
48 .instantiate = user_instantiate, 50 .instantiate = user_instantiate,
49 .update = user_update, 51 .update = user_update,
50 .match = user_match, 52 .match = user_match,
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 234bc2ab450c..9a62045e6282 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -397,7 +397,8 @@ void common_lsm_audit(struct common_audit_data *a,
397 if (a == NULL) 397 if (a == NULL)
398 return; 398 return;
399 /* we use GFP_ATOMIC so we won't sleep */ 399 /* we use GFP_ATOMIC so we won't sleep */
400 ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC); 400 ab = audit_log_start(current->audit_context, GFP_ATOMIC | __GFP_NOWARN,
401 AUDIT_AVC);
401 402
402 if (ab == NULL) 403 if (ab == NULL)
403 return; 404 return;
diff --git a/security/security.c b/security/security.c
index 4dc31f4f2700..15b6928592ef 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1340,22 +1340,17 @@ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
1340 return security_ops->xfrm_policy_delete_security(ctx); 1340 return security_ops->xfrm_policy_delete_security(ctx);
1341} 1341}
1342 1342
1343int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) 1343int security_xfrm_state_alloc(struct xfrm_state *x,
1344 struct xfrm_user_sec_ctx *sec_ctx)
1344{ 1345{
1345 return security_ops->xfrm_state_alloc_security(x, sec_ctx, 0); 1346 return security_ops->xfrm_state_alloc(x, sec_ctx);
1346} 1347}
1347EXPORT_SYMBOL(security_xfrm_state_alloc); 1348EXPORT_SYMBOL(security_xfrm_state_alloc);
1348 1349
1349int security_xfrm_state_alloc_acquire(struct xfrm_state *x, 1350int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
1350 struct xfrm_sec_ctx *polsec, u32 secid) 1351 struct xfrm_sec_ctx *polsec, u32 secid)
1351{ 1352{
1352 if (!polsec) 1353 return security_ops->xfrm_state_alloc_acquire(x, polsec, secid);
1353 return 0;
1354 /*
1355 * We want the context to be taken from secid which is usually
1356 * from the sock.
1357 */
1358 return security_ops->xfrm_state_alloc_security(x, NULL, secid);
1359} 1354}
1360 1355
1361int security_xfrm_state_delete(struct xfrm_state *x) 1356int security_xfrm_state_delete(struct xfrm_state *x)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c540795fb3f2..419491d8e7d2 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -53,6 +53,7 @@
53#include <net/ip.h> /* for local_port_range[] */ 53#include <net/ip.h> /* for local_port_range[] */
54#include <net/sock.h> 54#include <net/sock.h>
55#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ 55#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
56#include <net/inet_connection_sock.h>
56#include <net/net_namespace.h> 57#include <net/net_namespace.h>
57#include <net/netlabel.h> 58#include <net/netlabel.h>
58#include <linux/uaccess.h> 59#include <linux/uaccess.h>
@@ -95,8 +96,6 @@
95#include "audit.h" 96#include "audit.h"
96#include "avc_ss.h" 97#include "avc_ss.h"
97 98
98#define NUM_SEL_MNT_OPTS 5
99
100extern struct security_operations *security_ops; 99extern struct security_operations *security_ops;
101 100
102/* SECMARK reference count */ 101/* SECMARK reference count */
@@ -139,12 +138,28 @@ static struct kmem_cache *sel_inode_cache;
139 * This function checks the SECMARK reference counter to see if any SECMARK 138 * This function checks the SECMARK reference counter to see if any SECMARK
140 * targets are currently configured, if the reference counter is greater than 139 * targets are currently configured, if the reference counter is greater than
141 * zero SECMARK is considered to be enabled. Returns true (1) if SECMARK is 140 * zero SECMARK is considered to be enabled. Returns true (1) if SECMARK is
142 * enabled, false (0) if SECMARK is disabled. 141 * enabled, false (0) if SECMARK is disabled. If the always_check_network
142 * policy capability is enabled, SECMARK is always considered enabled.
143 * 143 *
144 */ 144 */
145static int selinux_secmark_enabled(void) 145static int selinux_secmark_enabled(void)
146{ 146{
147 return (atomic_read(&selinux_secmark_refcount) > 0); 147 return (selinux_policycap_alwaysnetwork || atomic_read(&selinux_secmark_refcount));
148}
149
150/**
151 * selinux_peerlbl_enabled - Check to see if peer labeling is currently enabled
152 *
153 * Description:
154 * This function checks if NetLabel or labeled IPSEC is enabled. Returns true
155 * (1) if any are enabled or false (0) if neither are enabled. If the
156 * always_check_network policy capability is enabled, peer labeling
157 * is always considered enabled.
158 *
159 */
160static int selinux_peerlbl_enabled(void)
161{
162 return (selinux_policycap_alwaysnetwork || netlbl_enabled() || selinux_xfrm_enabled());
148} 163}
149 164
150/* 165/*
@@ -309,8 +324,11 @@ enum {
309 Opt_defcontext = 3, 324 Opt_defcontext = 3,
310 Opt_rootcontext = 4, 325 Opt_rootcontext = 4,
311 Opt_labelsupport = 5, 326 Opt_labelsupport = 5,
327 Opt_nextmntopt = 6,
312}; 328};
313 329
330#define NUM_SEL_MNT_OPTS (Opt_nextmntopt - 1)
331
314static const match_table_t tokens = { 332static const match_table_t tokens = {
315 {Opt_context, CONTEXT_STR "%s"}, 333 {Opt_context, CONTEXT_STR "%s"},
316 {Opt_fscontext, FSCONTEXT_STR "%s"}, 334 {Opt_fscontext, FSCONTEXT_STR "%s"},
@@ -355,6 +373,29 @@ static int may_context_mount_inode_relabel(u32 sid,
355 return rc; 373 return rc;
356} 374}
357 375
376static int selinux_is_sblabel_mnt(struct super_block *sb)
377{
378 struct superblock_security_struct *sbsec = sb->s_security;
379
380 if (sbsec->behavior == SECURITY_FS_USE_XATTR ||
381 sbsec->behavior == SECURITY_FS_USE_TRANS ||
382 sbsec->behavior == SECURITY_FS_USE_TASK)
383 return 1;
384
385 /* Special handling for sysfs. Is genfs but also has setxattr handler*/
386 if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
387 return 1;
388
389 /*
390 * Special handling for rootfs. Is genfs but supports
391 * setting SELinux context on in-core inodes.
392 */
393 if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0)
394 return 1;
395
396 return 0;
397}
398
358static int sb_finish_set_opts(struct super_block *sb) 399static int sb_finish_set_opts(struct super_block *sb)
359{ 400{
360 struct superblock_security_struct *sbsec = sb->s_security; 401 struct superblock_security_struct *sbsec = sb->s_security;
@@ -388,8 +429,6 @@ static int sb_finish_set_opts(struct super_block *sb)
388 } 429 }
389 } 430 }
390 431
391 sbsec->flags |= (SE_SBINITIALIZED | SE_SBLABELSUPP);
392
393 if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) 432 if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
394 printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n", 433 printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
395 sb->s_id, sb->s_type->name); 434 sb->s_id, sb->s_type->name);
@@ -398,15 +437,9 @@ static int sb_finish_set_opts(struct super_block *sb)
398 sb->s_id, sb->s_type->name, 437 sb->s_id, sb->s_type->name,
399 labeling_behaviors[sbsec->behavior-1]); 438 labeling_behaviors[sbsec->behavior-1]);
400 439
401 if (sbsec->behavior == SECURITY_FS_USE_GENFS || 440 sbsec->flags |= SE_SBINITIALIZED;
402 sbsec->behavior == SECURITY_FS_USE_MNTPOINT || 441 if (selinux_is_sblabel_mnt(sb))
403 sbsec->behavior == SECURITY_FS_USE_NONE || 442 sbsec->flags |= SBLABEL_MNT;
404 sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
405 sbsec->flags &= ~SE_SBLABELSUPP;
406
407 /* Special handling for sysfs. Is genfs but also has setxattr handler*/
408 if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
409 sbsec->flags |= SE_SBLABELSUPP;
410 443
411 /* Initialize the root inode. */ 444 /* Initialize the root inode. */
412 rc = inode_doinit_with_dentry(root_inode, root); 445 rc = inode_doinit_with_dentry(root_inode, root);
@@ -460,15 +493,18 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
460 if (!ss_initialized) 493 if (!ss_initialized)
461 return -EINVAL; 494 return -EINVAL;
462 495
496 /* make sure we always check enough bits to cover the mask */
497 BUILD_BUG_ON(SE_MNTMASK >= (1 << NUM_SEL_MNT_OPTS));
498
463 tmp = sbsec->flags & SE_MNTMASK; 499 tmp = sbsec->flags & SE_MNTMASK;
464 /* count the number of mount options for this sb */ 500 /* count the number of mount options for this sb */
465 for (i = 0; i < 8; i++) { 501 for (i = 0; i < NUM_SEL_MNT_OPTS; i++) {
466 if (tmp & 0x01) 502 if (tmp & 0x01)
467 opts->num_mnt_opts++; 503 opts->num_mnt_opts++;
468 tmp >>= 1; 504 tmp >>= 1;
469 } 505 }
470 /* Check if the Label support flag is set */ 506 /* Check if the Label support flag is set */
471 if (sbsec->flags & SE_SBLABELSUPP) 507 if (sbsec->flags & SBLABEL_MNT)
472 opts->num_mnt_opts++; 508 opts->num_mnt_opts++;
473 509
474 opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC); 510 opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC);
@@ -515,9 +551,9 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
515 opts->mnt_opts[i] = context; 551 opts->mnt_opts[i] = context;
516 opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT; 552 opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT;
517 } 553 }
518 if (sbsec->flags & SE_SBLABELSUPP) { 554 if (sbsec->flags & SBLABEL_MNT) {
519 opts->mnt_opts[i] = NULL; 555 opts->mnt_opts[i] = NULL;
520 opts->mnt_opts_flags[i++] = SE_SBLABELSUPP; 556 opts->mnt_opts_flags[i++] = SBLABEL_MNT;
521 } 557 }
522 558
523 BUG_ON(i != opts->num_mnt_opts); 559 BUG_ON(i != opts->num_mnt_opts);
@@ -614,7 +650,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
614 for (i = 0; i < num_opts; i++) { 650 for (i = 0; i < num_opts; i++) {
615 u32 sid; 651 u32 sid;
616 652
617 if (flags[i] == SE_SBLABELSUPP) 653 if (flags[i] == SBLABEL_MNT)
618 continue; 654 continue;
619 rc = security_context_to_sid(mount_options[i], 655 rc = security_context_to_sid(mount_options[i],
620 strlen(mount_options[i]), &sid); 656 strlen(mount_options[i]), &sid);
@@ -685,9 +721,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
685 * Determine the labeling behavior to use for this 721 * Determine the labeling behavior to use for this
686 * filesystem type. 722 * filesystem type.
687 */ 723 */
688 rc = security_fs_use((sbsec->flags & SE_SBPROC) ? 724 rc = security_fs_use(sb);
689 "proc" : sb->s_type->name,
690 &sbsec->behavior, &sbsec->sid);
691 if (rc) { 725 if (rc) {
692 printk(KERN_WARNING 726 printk(KERN_WARNING
693 "%s: security_fs_use(%s) returned %d\n", 727 "%s: security_fs_use(%s) returned %d\n",
@@ -1037,7 +1071,7 @@ static void selinux_write_opts(struct seq_file *m,
1037 case DEFCONTEXT_MNT: 1071 case DEFCONTEXT_MNT:
1038 prefix = DEFCONTEXT_STR; 1072 prefix = DEFCONTEXT_STR;
1039 break; 1073 break;
1040 case SE_SBLABELSUPP: 1074 case SBLABEL_MNT:
1041 seq_putc(m, ','); 1075 seq_putc(m, ',');
1042 seq_puts(m, LABELSUPP_STR); 1076 seq_puts(m, LABELSUPP_STR);
1043 continue; 1077 continue;
@@ -1649,7 +1683,7 @@ static int may_create(struct inode *dir,
1649 if (rc) 1683 if (rc)
1650 return rc; 1684 return rc;
1651 1685
1652 if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { 1686 if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
1653 rc = security_transition_sid(sid, dsec->sid, tclass, 1687 rc = security_transition_sid(sid, dsec->sid, tclass,
1654 &dentry->d_name, &newsid); 1688 &dentry->d_name, &newsid);
1655 if (rc) 1689 if (rc)
@@ -2437,7 +2471,7 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
2437 u32 sid; 2471 u32 sid;
2438 size_t len; 2472 size_t len;
2439 2473
2440 if (flags[i] == SE_SBLABELSUPP) 2474 if (flags[i] == SBLABEL_MNT)
2441 continue; 2475 continue;
2442 len = strlen(mount_options[i]); 2476 len = strlen(mount_options[i]);
2443 rc = security_context_to_sid(mount_options[i], len, &sid); 2477 rc = security_context_to_sid(mount_options[i], len, &sid);
@@ -2606,7 +2640,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
2606 if ((sbsec->flags & SE_SBINITIALIZED) && 2640 if ((sbsec->flags & SE_SBINITIALIZED) &&
2607 (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) 2641 (sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
2608 newsid = sbsec->mntpoint_sid; 2642 newsid = sbsec->mntpoint_sid;
2609 else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { 2643 else if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
2610 rc = security_transition_sid(sid, dsec->sid, 2644 rc = security_transition_sid(sid, dsec->sid,
2611 inode_mode_to_security_class(inode->i_mode), 2645 inode_mode_to_security_class(inode->i_mode),
2612 qstr, &newsid); 2646 qstr, &newsid);
@@ -2628,7 +2662,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
2628 isec->initialized = 1; 2662 isec->initialized = 1;
2629 } 2663 }
2630 2664
2631 if (!ss_initialized || !(sbsec->flags & SE_SBLABELSUPP)) 2665 if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT))
2632 return -EOPNOTSUPP; 2666 return -EOPNOTSUPP;
2633 2667
2634 if (name) 2668 if (name)
@@ -2830,7 +2864,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
2830 return selinux_inode_setotherxattr(dentry, name); 2864 return selinux_inode_setotherxattr(dentry, name);
2831 2865
2832 sbsec = inode->i_sb->s_security; 2866 sbsec = inode->i_sb->s_security;
2833 if (!(sbsec->flags & SE_SBLABELSUPP)) 2867 if (!(sbsec->flags & SBLABEL_MNT))
2834 return -EOPNOTSUPP; 2868 return -EOPNOTSUPP;
2835 2869
2836 if (!inode_owner_or_capable(inode)) 2870 if (!inode_owner_or_capable(inode))
@@ -3791,8 +3825,12 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
3791 u32 nlbl_sid; 3825 u32 nlbl_sid;
3792 u32 nlbl_type; 3826 u32 nlbl_type;
3793 3827
3794 selinux_skb_xfrm_sid(skb, &xfrm_sid); 3828 err = selinux_xfrm_skb_sid(skb, &xfrm_sid);
3795 selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid); 3829 if (unlikely(err))
3830 return -EACCES;
3831 err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
3832 if (unlikely(err))
3833 return -EACCES;
3796 3834
3797 err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid); 3835 err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
3798 if (unlikely(err)) { 3836 if (unlikely(err)) {
@@ -3805,6 +3843,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
3805 return 0; 3843 return 0;
3806} 3844}
3807 3845
3846/**
3847 * selinux_conn_sid - Determine the child socket label for a connection
3848 * @sk_sid: the parent socket's SID
3849 * @skb_sid: the packet's SID
3850 * @conn_sid: the resulting connection SID
3851 *
3852 * If @skb_sid is valid then the user:role:type information from @sk_sid is
3853 * combined with the MLS information from @skb_sid in order to create
3854 * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
3855 * of @sk_sid. Returns zero on success, negative values on failure.
3856 *
3857 */
3858static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
3859{
3860 int err = 0;
3861
3862 if (skb_sid != SECSID_NULL)
3863 err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
3864 else
3865 *conn_sid = sk_sid;
3866
3867 return err;
3868}
3869
3808/* socket security operations */ 3870/* socket security operations */
3809 3871
3810static int socket_sockcreate_sid(const struct task_security_struct *tsec, 3872static int socket_sockcreate_sid(const struct task_security_struct *tsec,
@@ -4246,7 +4308,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
4246 return selinux_sock_rcv_skb_compat(sk, skb, family); 4308 return selinux_sock_rcv_skb_compat(sk, skb, family);
4247 4309
4248 secmark_active = selinux_secmark_enabled(); 4310 secmark_active = selinux_secmark_enabled();
4249 peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled(); 4311 peerlbl_active = selinux_peerlbl_enabled();
4250 if (!secmark_active && !peerlbl_active) 4312 if (!secmark_active && !peerlbl_active)
4251 return 0; 4313 return 0;
4252 4314
@@ -4411,7 +4473,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
4411 struct sk_security_struct *sksec = sk->sk_security; 4473 struct sk_security_struct *sksec = sk->sk_security;
4412 int err; 4474 int err;
4413 u16 family = sk->sk_family; 4475 u16 family = sk->sk_family;
4414 u32 newsid; 4476 u32 connsid;
4415 u32 peersid; 4477 u32 peersid;
4416 4478
4417 /* handle mapped IPv4 packets arriving via IPv6 sockets */ 4479 /* handle mapped IPv4 packets arriving via IPv6 sockets */
@@ -4421,16 +4483,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
4421 err = selinux_skb_peerlbl_sid(skb, family, &peersid); 4483 err = selinux_skb_peerlbl_sid(skb, family, &peersid);
4422 if (err) 4484 if (err)
4423 return err; 4485 return err;
4424 if (peersid == SECSID_NULL) { 4486 err = selinux_conn_sid(sksec->sid, peersid, &connsid);
4425 req->secid = sksec->sid; 4487 if (err)
4426 req->peer_secid = SECSID_NULL; 4488 return err;
4427 } else { 4489 req->secid = connsid;
4428 err = security_sid_mls_copy(sksec->sid, peersid, &newsid); 4490 req->peer_secid = peersid;
4429 if (err)
4430 return err;
4431 req->secid = newsid;
4432 req->peer_secid = peersid;
4433 }
4434 4491
4435 return selinux_netlbl_inet_conn_request(req, family); 4492 return selinux_netlbl_inet_conn_request(req, family);
4436} 4493}
@@ -4628,7 +4685,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
4628 4685
4629 secmark_active = selinux_secmark_enabled(); 4686 secmark_active = selinux_secmark_enabled();
4630 netlbl_active = netlbl_enabled(); 4687 netlbl_active = netlbl_enabled();
4631 peerlbl_active = netlbl_active || selinux_xfrm_enabled(); 4688 peerlbl_active = selinux_peerlbl_enabled();
4632 if (!secmark_active && !peerlbl_active) 4689 if (!secmark_active && !peerlbl_active)
4633 return NF_ACCEPT; 4690 return NF_ACCEPT;
4634 4691
@@ -4690,6 +4747,7 @@ static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
4690static unsigned int selinux_ip_output(struct sk_buff *skb, 4747static unsigned int selinux_ip_output(struct sk_buff *skb,
4691 u16 family) 4748 u16 family)
4692{ 4749{
4750 struct sock *sk;
4693 u32 sid; 4751 u32 sid;
4694 4752
4695 if (!netlbl_enabled()) 4753 if (!netlbl_enabled())
@@ -4698,8 +4756,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
4698 /* we do this in the LOCAL_OUT path and not the POST_ROUTING path 4756 /* we do this in the LOCAL_OUT path and not the POST_ROUTING path
4699 * because we want to make sure we apply the necessary labeling 4757 * because we want to make sure we apply the necessary labeling
4700 * before IPsec is applied so we can leverage AH protection */ 4758 * before IPsec is applied so we can leverage AH protection */
4701 if (skb->sk) { 4759 sk = skb->sk;
4702 struct sk_security_struct *sksec = skb->sk->sk_security; 4760 if (sk) {
4761 struct sk_security_struct *sksec;
4762
4763 if (sk->sk_state == TCP_LISTEN)
4764 /* if the socket is the listening state then this
4765 * packet is a SYN-ACK packet which means it needs to
4766 * be labeled based on the connection/request_sock and
4767 * not the parent socket. unfortunately, we can't
4768 * lookup the request_sock yet as it isn't queued on
4769 * the parent socket until after the SYN-ACK is sent.
4770 * the "solution" is to simply pass the packet as-is
4771 * as any IP option based labeling should be copied
4772 * from the initial connection request (in the IP
4773 * layer). it is far from ideal, but until we get a
4774 * security label in the packet itself this is the
4775 * best we can do. */
4776 return NF_ACCEPT;
4777
4778 /* standard practice, label using the parent socket */
4779 sksec = sk->sk_security;
4703 sid = sksec->sid; 4780 sid = sksec->sid;
4704 } else 4781 } else
4705 sid = SECINITSID_KERNEL; 4782 sid = SECINITSID_KERNEL;
@@ -4769,27 +4846,36 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4769 * as fast and as clean as possible. */ 4846 * as fast and as clean as possible. */
4770 if (!selinux_policycap_netpeer) 4847 if (!selinux_policycap_netpeer)
4771 return selinux_ip_postroute_compat(skb, ifindex, family); 4848 return selinux_ip_postroute_compat(skb, ifindex, family);
4849
4850 secmark_active = selinux_secmark_enabled();
4851 peerlbl_active = selinux_peerlbl_enabled();
4852 if (!secmark_active && !peerlbl_active)
4853 return NF_ACCEPT;
4854
4855 sk = skb->sk;
4856
4772#ifdef CONFIG_XFRM 4857#ifdef CONFIG_XFRM
4773 /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec 4858 /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
4774 * packet transformation so allow the packet to pass without any checks 4859 * packet transformation so allow the packet to pass without any checks
4775 * since we'll have another chance to perform access control checks 4860 * since we'll have another chance to perform access control checks
4776 * when the packet is on it's final way out. 4861 * when the packet is on it's final way out.
4777 * NOTE: there appear to be some IPv6 multicast cases where skb->dst 4862 * NOTE: there appear to be some IPv6 multicast cases where skb->dst
4778 * is NULL, in this case go ahead and apply access control. */ 4863 * is NULL, in this case go ahead and apply access control.
4779 if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL) 4864 * NOTE: if this is a local socket (skb->sk != NULL) that is in the
4865 * TCP listening state we cannot wait until the XFRM processing
4866 * is done as we will miss out on the SA label if we do;
4867 * unfortunately, this means more work, but it is only once per
4868 * connection. */
4869 if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
4870 !(sk != NULL && sk->sk_state == TCP_LISTEN))
4780 return NF_ACCEPT; 4871 return NF_ACCEPT;
4781#endif 4872#endif
4782 secmark_active = selinux_secmark_enabled();
4783 peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
4784 if (!secmark_active && !peerlbl_active)
4785 return NF_ACCEPT;
4786 4873
4787 /* if the packet is being forwarded then get the peer label from the
4788 * packet itself; otherwise check to see if it is from a local
4789 * application or the kernel, if from an application get the peer label
4790 * from the sending socket, otherwise use the kernel's sid */
4791 sk = skb->sk;
4792 if (sk == NULL) { 4874 if (sk == NULL) {
4875 /* Without an associated socket the packet is either coming
4876 * from the kernel or it is being forwarded; check the packet
4877 * to determine which and if the packet is being forwarded
4878 * query the packet directly to determine the security label. */
4793 if (skb->skb_iif) { 4879 if (skb->skb_iif) {
4794 secmark_perm = PACKET__FORWARD_OUT; 4880 secmark_perm = PACKET__FORWARD_OUT;
4795 if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) 4881 if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
@@ -4798,7 +4884,45 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4798 secmark_perm = PACKET__SEND; 4884 secmark_perm = PACKET__SEND;
4799 peer_sid = SECINITSID_KERNEL; 4885 peer_sid = SECINITSID_KERNEL;
4800 } 4886 }
4887 } else if (sk->sk_state == TCP_LISTEN) {
4888 /* Locally generated packet but the associated socket is in the
4889 * listening state which means this is a SYN-ACK packet. In
4890 * this particular case the correct security label is assigned
4891 * to the connection/request_sock but unfortunately we can't
4892 * query the request_sock as it isn't queued on the parent
4893 * socket until after the SYN-ACK packet is sent; the only
4894 * viable choice is to regenerate the label like we do in
4895 * selinux_inet_conn_request(). See also selinux_ip_output()
4896 * for similar problems. */
4897 u32 skb_sid;
4898 struct sk_security_struct *sksec = sk->sk_security;
4899 if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
4900 return NF_DROP;
4901 /* At this point, if the returned skb peerlbl is SECSID_NULL
4902 * and the packet has been through at least one XFRM
4903 * transformation then we must be dealing with the "final"
4904 * form of labeled IPsec packet; since we've already applied
4905 * all of our access controls on this packet we can safely
4906 * pass the packet. */
4907 if (skb_sid == SECSID_NULL) {
4908 switch (family) {
4909 case PF_INET:
4910 if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
4911 return NF_ACCEPT;
4912 break;
4913 case PF_INET6:
4914 if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
4915 return NF_ACCEPT;
4916 default:
4917 return NF_DROP_ERR(-ECONNREFUSED);
4918 }
4919 }
4920 if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
4921 return NF_DROP;
4922 secmark_perm = PACKET__SEND;
4801 } else { 4923 } else {
4924 /* Locally generated packet, fetch the security label from the
4925 * associated socket. */
4802 struct sk_security_struct *sksec = sk->sk_security; 4926 struct sk_security_struct *sksec = sk->sk_security;
4803 peer_sid = sksec->sid; 4927 peer_sid = sksec->sid;
4804 secmark_perm = PACKET__SEND; 4928 secmark_perm = PACKET__SEND;
@@ -5784,7 +5908,8 @@ static struct security_operations selinux_ops = {
5784 .xfrm_policy_clone_security = selinux_xfrm_policy_clone, 5908 .xfrm_policy_clone_security = selinux_xfrm_policy_clone,
5785 .xfrm_policy_free_security = selinux_xfrm_policy_free, 5909 .xfrm_policy_free_security = selinux_xfrm_policy_free,
5786 .xfrm_policy_delete_security = selinux_xfrm_policy_delete, 5910 .xfrm_policy_delete_security = selinux_xfrm_policy_delete,
5787 .xfrm_state_alloc_security = selinux_xfrm_state_alloc, 5911 .xfrm_state_alloc = selinux_xfrm_state_alloc,
5912 .xfrm_state_alloc_acquire = selinux_xfrm_state_alloc_acquire,
5788 .xfrm_state_free_security = selinux_xfrm_state_free, 5913 .xfrm_state_free_security = selinux_xfrm_state_free,
5789 .xfrm_state_delete_security = selinux_xfrm_state_delete, 5914 .xfrm_state_delete_security = selinux_xfrm_state_delete,
5790 .xfrm_policy_lookup = selinux_xfrm_policy_lookup, 5915 .xfrm_policy_lookup = selinux_xfrm_policy_lookup,
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index aa47bcabb5f6..b1dfe1049450 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -58,8 +58,8 @@ struct superblock_security_struct {
58 u32 sid; /* SID of file system superblock */ 58 u32 sid; /* SID of file system superblock */
59 u32 def_sid; /* default SID for labeling */ 59 u32 def_sid; /* default SID for labeling */
60 u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */ 60 u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */
61 unsigned int behavior; /* labeling behavior */ 61 unsigned short behavior; /* labeling behavior */
62 unsigned char flags; /* which mount options were specified */ 62 unsigned short flags; /* which mount options were specified */
63 struct mutex lock; 63 struct mutex lock;
64 struct list_head isec_head; 64 struct list_head isec_head;
65 spinlock_t isec_lock; 65 spinlock_t isec_lock;
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 8fd8e18ea340..fe341ae37004 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -45,14 +45,15 @@
45/* Mask for just the mount related flags */ 45/* Mask for just the mount related flags */
46#define SE_MNTMASK 0x0f 46#define SE_MNTMASK 0x0f
47/* Super block security struct flags for mount options */ 47/* Super block security struct flags for mount options */
48/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */
48#define CONTEXT_MNT 0x01 49#define CONTEXT_MNT 0x01
49#define FSCONTEXT_MNT 0x02 50#define FSCONTEXT_MNT 0x02
50#define ROOTCONTEXT_MNT 0x04 51#define ROOTCONTEXT_MNT 0x04
51#define DEFCONTEXT_MNT 0x08 52#define DEFCONTEXT_MNT 0x08
53#define SBLABEL_MNT 0x10
52/* Non-mount related flags */ 54/* Non-mount related flags */
53#define SE_SBINITIALIZED 0x10 55#define SE_SBINITIALIZED 0x0100
54#define SE_SBPROC 0x20 56#define SE_SBPROC 0x0200
55#define SE_SBLABELSUPP 0x40
56 57
57#define CONTEXT_STR "context=" 58#define CONTEXT_STR "context="
58#define FSCONTEXT_STR "fscontext=" 59#define FSCONTEXT_STR "fscontext="
@@ -68,12 +69,15 @@ extern int selinux_enabled;
68enum { 69enum {
69 POLICYDB_CAPABILITY_NETPEER, 70 POLICYDB_CAPABILITY_NETPEER,
70 POLICYDB_CAPABILITY_OPENPERM, 71 POLICYDB_CAPABILITY_OPENPERM,
72 POLICYDB_CAPABILITY_REDHAT1,
73 POLICYDB_CAPABILITY_ALWAYSNETWORK,
71 __POLICYDB_CAPABILITY_MAX 74 __POLICYDB_CAPABILITY_MAX
72}; 75};
73#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) 76#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
74 77
75extern int selinux_policycap_netpeer; 78extern int selinux_policycap_netpeer;
76extern int selinux_policycap_openperm; 79extern int selinux_policycap_openperm;
80extern int selinux_policycap_alwaysnetwork;
77 81
78/* 82/*
79 * type_datum properties 83 * type_datum properties
@@ -172,8 +176,7 @@ int security_get_allow_unknown(void);
172#define SECURITY_FS_USE_NATIVE 7 /* use native label support */ 176#define SECURITY_FS_USE_NATIVE 7 /* use native label support */
173#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */ 177#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */
174 178
175int security_fs_use(const char *fstype, unsigned int *behavior, 179int security_fs_use(struct super_block *sb);
176 u32 *sid);
177 180
178int security_genfs_sid(const char *fstype, char *name, u16 sclass, 181int security_genfs_sid(const char *fstype, char *name, u16 sclass,
179 u32 *sid); 182 u32 *sid);
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 6713f04e30ba..48c3cc94c168 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -10,29 +10,21 @@
10#include <net/flow.h> 10#include <net/flow.h>
11 11
12int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, 12int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
13 struct xfrm_user_sec_ctx *sec_ctx); 13 struct xfrm_user_sec_ctx *uctx);
14int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, 14int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
15 struct xfrm_sec_ctx **new_ctxp); 15 struct xfrm_sec_ctx **new_ctxp);
16void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx); 16void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
17int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); 17int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
18int selinux_xfrm_state_alloc(struct xfrm_state *x, 18int selinux_xfrm_state_alloc(struct xfrm_state *x,
19 struct xfrm_user_sec_ctx *sec_ctx, u32 secid); 19 struct xfrm_user_sec_ctx *uctx);
20int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
21 struct xfrm_sec_ctx *polsec, u32 secid);
20void selinux_xfrm_state_free(struct xfrm_state *x); 22void selinux_xfrm_state_free(struct xfrm_state *x);
21int selinux_xfrm_state_delete(struct xfrm_state *x); 23int selinux_xfrm_state_delete(struct xfrm_state *x);
22int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); 24int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
23int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, 25int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
24 struct xfrm_policy *xp, const struct flowi *fl); 26 struct xfrm_policy *xp,
25 27 const struct flowi *fl);
26/*
27 * Extract the security blob from the sock (it's actually on the socket)
28 */
29static inline struct inode_security_struct *get_sock_isec(struct sock *sk)
30{
31 if (!sk->sk_socket)
32 return NULL;
33
34 return SOCK_INODE(sk->sk_socket)->i_security;
35}
36 28
37#ifdef CONFIG_SECURITY_NETWORK_XFRM 29#ifdef CONFIG_SECURITY_NETWORK_XFRM
38extern atomic_t selinux_xfrm_refcount; 30extern atomic_t selinux_xfrm_refcount;
@@ -42,11 +34,12 @@ static inline int selinux_xfrm_enabled(void)
42 return (atomic_read(&selinux_xfrm_refcount) > 0); 34 return (atomic_read(&selinux_xfrm_refcount) > 0);
43} 35}
44 36
45int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb, 37int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
46 struct common_audit_data *ad); 38 struct common_audit_data *ad);
47int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, 39int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
48 struct common_audit_data *ad, u8 proto); 40 struct common_audit_data *ad, u8 proto);
49int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); 41int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
42int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
50 43
51static inline void selinux_xfrm_notify_policyload(void) 44static inline void selinux_xfrm_notify_policyload(void)
52{ 45{
@@ -64,19 +57,21 @@ static inline int selinux_xfrm_enabled(void)
64 return 0; 57 return 0;
65} 58}
66 59
67static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, 60static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
68 struct common_audit_data *ad) 61 struct common_audit_data *ad)
69{ 62{
70 return 0; 63 return 0;
71} 64}
72 65
73static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, 66static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
74 struct common_audit_data *ad, u8 proto) 67 struct common_audit_data *ad,
68 u8 proto)
75{ 69{
76 return 0; 70 return 0;
77} 71}
78 72
79static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) 73static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
74 int ckall)
80{ 75{
81 *sid = SECSID_NULL; 76 *sid = SECSID_NULL;
82 return 0; 77 return 0;
@@ -85,12 +80,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int
85static inline void selinux_xfrm_notify_policyload(void) 80static inline void selinux_xfrm_notify_policyload(void)
86{ 81{
87} 82}
88#endif
89 83
90static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid) 84static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
91{ 85{
92 int err = selinux_xfrm_decode_session(skb, sid, 0); 86 *sid = SECSID_NULL;
93 BUG_ON(err); 87 return 0;
94} 88}
89#endif
95 90
96#endif /* _SELINUX_XFRM_H_ */ 91#endif /* _SELINUX_XFRM_H_ */
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index da4b8b233280..6235d052338b 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -442,8 +442,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
442 sksec->nlbl_state != NLBL_CONNLABELED) 442 sksec->nlbl_state != NLBL_CONNLABELED)
443 return 0; 443 return 0;
444 444
445 local_bh_disable(); 445 lock_sock(sk);
446 bh_lock_sock_nested(sk);
447 446
448 /* connected sockets are allowed to disconnect when the address family 447 /* connected sockets are allowed to disconnect when the address family
449 * is set to AF_UNSPEC, if that is what is happening we want to reset 448 * is set to AF_UNSPEC, if that is what is happening we want to reset
@@ -464,7 +463,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
464 sksec->nlbl_state = NLBL_CONNLABELED; 463 sksec->nlbl_state = NLBL_CONNLABELED;
465 464
466socket_connect_return: 465socket_connect_return:
467 bh_unlock_sock(sk); 466 release_sock(sk);
468 local_bh_enable();
469 return rc; 467 return rc;
470} 468}
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index c5454c0477c3..03a72c32afd7 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -166,6 +166,7 @@ static void sel_netnode_insert(struct sel_netnode *node)
166 break; 166 break;
167 default: 167 default:
168 BUG(); 168 BUG();
169 return;
169 } 170 }
170 171
171 /* we need to impose a limit on the growth of the hash table so check 172 /* we need to impose a limit on the growth of the hash table so check
@@ -225,6 +226,7 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
225 break; 226 break;
226 default: 227 default:
227 BUG(); 228 BUG();
229 ret = -EINVAL;
228 } 230 }
229 if (ret != 0) 231 if (ret != 0)
230 goto out; 232 goto out;
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 855e464e92ef..332ac8a80cf5 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -116,6 +116,8 @@ static struct nlmsg_perm nlmsg_audit_perms[] =
116 { AUDIT_MAKE_EQUIV, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, 116 { AUDIT_MAKE_EQUIV, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
117 { AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ }, 117 { AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ },
118 { AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT }, 118 { AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT },
119 { AUDIT_GET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_READ },
120 { AUDIT_SET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
119}; 121};
120 122
121 123
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index ff427733c290..5122affe06a8 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -44,7 +44,9 @@
44/* Policy capability filenames */ 44/* Policy capability filenames */
45static char *policycap_names[] = { 45static char *policycap_names[] = {
46 "network_peer_controls", 46 "network_peer_controls",
47 "open_perms" 47 "open_perms",
48 "redhat1",
49 "always_check_network"
48}; 50};
49 51
50unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE; 52unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 30f119b1d1ec..820313a04d49 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -213,7 +213,12 @@ netlbl_import_failure:
213} 213}
214#endif /* CONFIG_NETLABEL */ 214#endif /* CONFIG_NETLABEL */
215 215
216int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2) 216/*
217 * Check to see if all the bits set in e2 are also set in e1. Optionally,
218 * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
219 * last_e2bit.
220 */
221int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
217{ 222{
218 struct ebitmap_node *n1, *n2; 223 struct ebitmap_node *n1, *n2;
219 int i; 224 int i;
@@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
223 228
224 n1 = e1->node; 229 n1 = e1->node;
225 n2 = e2->node; 230 n2 = e2->node;
231
226 while (n1 && n2 && (n1->startbit <= n2->startbit)) { 232 while (n1 && n2 && (n1->startbit <= n2->startbit)) {
227 if (n1->startbit < n2->startbit) { 233 if (n1->startbit < n2->startbit) {
228 n1 = n1->next; 234 n1 = n1->next;
229 continue; 235 continue;
230 } 236 }
231 for (i = 0; i < EBITMAP_UNIT_NUMS; i++) { 237 for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
238 i--; /* Skip trailing NULL map entries */
239 if (last_e2bit && (i >= 0)) {
240 u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
241 __fls(n2->maps[i]);
242 if (lastsetbit > last_e2bit)
243 return 0;
244 }
245
246 while (i >= 0) {
232 if ((n1->maps[i] & n2->maps[i]) != n2->maps[i]) 247 if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
233 return 0; 248 return 0;
249 i--;
234 } 250 }
235 251
236 n1 = n1->next; 252 n1 = n1->next;
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 922f8afa89dd..712c8a7b8e8b 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -16,7 +16,13 @@
16 16
17#include <net/netlabel.h> 17#include <net/netlabel.h>
18 18
19#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \ 19#ifdef CONFIG_64BIT
20#define EBITMAP_NODE_SIZE 64
21#else
22#define EBITMAP_NODE_SIZE 32
23#endif
24
25#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
20 / sizeof(unsigned long)) 26 / sizeof(unsigned long))
21#define EBITMAP_UNIT_SIZE BITS_PER_LONG 27#define EBITMAP_UNIT_SIZE BITS_PER_LONG
22#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE) 28#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
@@ -117,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
117 123
118int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2); 124int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
119int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src); 125int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
120int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2); 126int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
121int ebitmap_get_bit(struct ebitmap *e, unsigned long bit); 127int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
122int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value); 128int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
123void ebitmap_destroy(struct ebitmap *e); 129void ebitmap_destroy(struct ebitmap *e);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 40de8d3f208e..c85bc1ec040c 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -160,8 +160,6 @@ void mls_sid_to_context(struct context *context,
160int mls_level_isvalid(struct policydb *p, struct mls_level *l) 160int mls_level_isvalid(struct policydb *p, struct mls_level *l)
161{ 161{
162 struct level_datum *levdatum; 162 struct level_datum *levdatum;
163 struct ebitmap_node *node;
164 int i;
165 163
166 if (!l->sens || l->sens > p->p_levels.nprim) 164 if (!l->sens || l->sens > p->p_levels.nprim)
167 return 0; 165 return 0;
@@ -170,19 +168,13 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
170 if (!levdatum) 168 if (!levdatum)
171 return 0; 169 return 0;
172 170
173 ebitmap_for_each_positive_bit(&l->cat, node, i) { 171 /*
174 if (i > p->p_cats.nprim) 172 * Return 1 iff all the bits set in l->cat are also be set in
175 return 0; 173 * levdatum->level->cat and no bit in l->cat is larger than
176 if (!ebitmap_get_bit(&levdatum->level->cat, i)) { 174 * p->p_cats.nprim.
177 /* 175 */
178 * Category may not be associated with 176 return ebitmap_contains(&levdatum->level->cat, &l->cat,
179 * sensitivity. 177 p->p_cats.nprim);
180 */
181 return 0;
182 }
183 }
184
185 return 1;
186} 178}
187 179
188int mls_range_isvalid(struct policydb *p, struct mls_range *r) 180int mls_range_isvalid(struct policydb *p, struct mls_range *r)
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
index 03bed52a8052..e93648774137 100644
--- a/security/selinux/ss/mls_types.h
+++ b/security/selinux/ss/mls_types.h
@@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
35static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2) 35static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
36{ 36{
37 return ((l1->sens >= l2->sens) && 37 return ((l1->sens >= l2->sens) &&
38 ebitmap_contains(&l1->cat, &l2->cat)); 38 ebitmap_contains(&l1->cat, &l2->cat, 0));
39} 39}
40 40
41#define mls_level_incomp(l1, l2) \ 41#define mls_level_incomp(l1, l2) \
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index c8adde3aff8f..f6195ebde3c9 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -3203,9 +3203,8 @@ static int range_write_helper(void *key, void *data, void *ptr)
3203 3203
3204static int range_write(struct policydb *p, void *fp) 3204static int range_write(struct policydb *p, void *fp)
3205{ 3205{
3206 size_t nel;
3207 __le32 buf[1]; 3206 __le32 buf[1];
3208 int rc; 3207 int rc, nel;
3209 struct policy_data pd; 3208 struct policy_data pd;
3210 3209
3211 pd.p = p; 3210 pd.p = p;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b4feecc3fe01..d106733ad987 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -72,6 +72,7 @@
72 72
73int selinux_policycap_netpeer; 73int selinux_policycap_netpeer;
74int selinux_policycap_openperm; 74int selinux_policycap_openperm;
75int selinux_policycap_alwaysnetwork;
75 76
76static DEFINE_RWLOCK(policy_rwlock); 77static DEFINE_RWLOCK(policy_rwlock);
77 78
@@ -1812,6 +1813,8 @@ static void security_load_policycaps(void)
1812 POLICYDB_CAPABILITY_NETPEER); 1813 POLICYDB_CAPABILITY_NETPEER);
1813 selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps, 1814 selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
1814 POLICYDB_CAPABILITY_OPENPERM); 1815 POLICYDB_CAPABILITY_OPENPERM);
1816 selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps,
1817 POLICYDB_CAPABILITY_ALWAYSNETWORK);
1815} 1818}
1816 1819
1817static int security_preserve_bools(struct policydb *p); 1820static int security_preserve_bools(struct policydb *p);
@@ -2323,17 +2326,14 @@ out:
2323 2326
2324/** 2327/**
2325 * security_fs_use - Determine how to handle labeling for a filesystem. 2328 * security_fs_use - Determine how to handle labeling for a filesystem.
2326 * @fstype: filesystem type 2329 * @sb: superblock in question
2327 * @behavior: labeling behavior
2328 * @sid: SID for filesystem (superblock)
2329 */ 2330 */
2330int security_fs_use( 2331int security_fs_use(struct super_block *sb)
2331 const char *fstype,
2332 unsigned int *behavior,
2333 u32 *sid)
2334{ 2332{
2335 int rc = 0; 2333 int rc = 0;
2336 struct ocontext *c; 2334 struct ocontext *c;
2335 struct superblock_security_struct *sbsec = sb->s_security;
2336 const char *fstype = sb->s_type->name;
2337 2337
2338 read_lock(&policy_rwlock); 2338 read_lock(&policy_rwlock);
2339 2339
@@ -2345,21 +2345,21 @@ int security_fs_use(
2345 } 2345 }
2346 2346
2347 if (c) { 2347 if (c) {
2348 *behavior = c->v.behavior; 2348 sbsec->behavior = c->v.behavior;
2349 if (!c->sid[0]) { 2349 if (!c->sid[0]) {
2350 rc = sidtab_context_to_sid(&sidtab, &c->context[0], 2350 rc = sidtab_context_to_sid(&sidtab, &c->context[0],
2351 &c->sid[0]); 2351 &c->sid[0]);
2352 if (rc) 2352 if (rc)
2353 goto out; 2353 goto out;
2354 } 2354 }
2355 *sid = c->sid[0]; 2355 sbsec->sid = c->sid[0];
2356 } else { 2356 } else {
2357 rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, sid); 2357 rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, &sbsec->sid);
2358 if (rc) { 2358 if (rc) {
2359 *behavior = SECURITY_FS_USE_NONE; 2359 sbsec->behavior = SECURITY_FS_USE_NONE;
2360 rc = 0; 2360 rc = 0;
2361 } else { 2361 } else {
2362 *behavior = SECURITY_FS_USE_GENFS; 2362 sbsec->behavior = SECURITY_FS_USE_GENFS;
2363 } 2363 }
2364 } 2364 }
2365 2365
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index d03081886214..0462cb3ff0a7 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -56,7 +56,7 @@
56atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0); 56atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0);
57 57
58/* 58/*
59 * Returns true if an LSM/SELinux context 59 * Returns true if the context is an LSM/SELinux context.
60 */ 60 */
61static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx) 61static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
62{ 62{
@@ -66,7 +66,7 @@ static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
66} 66}
67 67
68/* 68/*
69 * Returns true if the xfrm contains a security blob for SELinux 69 * Returns true if the xfrm contains a security blob for SELinux.
70 */ 70 */
71static inline int selinux_authorizable_xfrm(struct xfrm_state *x) 71static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
72{ 72{
@@ -74,48 +74,111 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
74} 74}
75 75
76/* 76/*
77 * LSM hook implementation that authorizes that a flow can use 77 * Allocates a xfrm_sec_state and populates it using the supplied security
78 * a xfrm policy rule. 78 * xfrm_user_sec_ctx context.
79 */ 79 */
80int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) 80static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
81 struct xfrm_user_sec_ctx *uctx)
81{ 82{
82 int rc; 83 int rc;
83 u32 sel_sid; 84 const struct task_security_struct *tsec = current_security();
85 struct xfrm_sec_ctx *ctx = NULL;
86 u32 str_len;
84 87
85 /* Context sid is either set to label or ANY_ASSOC */ 88 if (ctxp == NULL || uctx == NULL ||
86 if (ctx) { 89 uctx->ctx_doi != XFRM_SC_DOI_LSM ||
87 if (!selinux_authorizable_ctx(ctx)) 90 uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
88 return -EINVAL; 91 return -EINVAL;
89 92
90 sel_sid = ctx->ctx_sid; 93 str_len = uctx->ctx_len;
91 } else 94 if (str_len >= PAGE_SIZE)
92 /* 95 return -ENOMEM;
93 * All flows should be treated as polmatch'ing an 96
94 * otherwise applicable "non-labeled" policy. This 97 ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL);
95 * would prevent inadvertent "leaks". 98 if (!ctx)
96 */ 99 return -ENOMEM;
97 return 0;
98 100
99 rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION, 101 ctx->ctx_doi = XFRM_SC_DOI_LSM;
100 ASSOCIATION__POLMATCH, 102 ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
101 NULL); 103 ctx->ctx_len = str_len;
104 memcpy(ctx->ctx_str, &uctx[1], str_len);
105 ctx->ctx_str[str_len] = '\0';
106 rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid);
107 if (rc)
108 goto err;
102 109
103 if (rc == -EACCES) 110 rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
104 return -ESRCH; 111 SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL);
112 if (rc)
113 goto err;
105 114
115 *ctxp = ctx;
116 atomic_inc(&selinux_xfrm_refcount);
117 return 0;
118
119err:
120 kfree(ctx);
106 return rc; 121 return rc;
107} 122}
108 123
109/* 124/*
125 * Free the xfrm_sec_ctx structure.
126 */
127static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx)
128{
129 if (!ctx)
130 return;
131
132 atomic_dec(&selinux_xfrm_refcount);
133 kfree(ctx);
134}
135
136/*
137 * Authorize the deletion of a labeled SA or policy rule.
138 */
139static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
140{
141 const struct task_security_struct *tsec = current_security();
142
143 if (!ctx)
144 return 0;
145
146 return avc_has_perm(tsec->sid, ctx->ctx_sid,
147 SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
148 NULL);
149}
150
151/*
152 * LSM hook implementation that authorizes that a flow can use a xfrm policy
153 * rule.
154 */
155int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
156{
157 int rc;
158
159 /* All flows should be treated as polmatch'ing an otherwise applicable
160 * "non-labeled" policy. This would prevent inadvertent "leaks". */
161 if (!ctx)
162 return 0;
163
164 /* Context sid is either set to label or ANY_ASSOC */
165 if (!selinux_authorizable_ctx(ctx))
166 return -EINVAL;
167
168 rc = avc_has_perm(fl_secid, ctx->ctx_sid,
169 SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL);
170 return (rc == -EACCES ? -ESRCH : rc);
171}
172
173/*
110 * LSM hook implementation that authorizes that a state matches 174 * LSM hook implementation that authorizes that a state matches
111 * the given policy, flow combo. 175 * the given policy, flow combo.
112 */ 176 */
113 177int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
114int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, 178 struct xfrm_policy *xp,
115 const struct flowi *fl) 179 const struct flowi *fl)
116{ 180{
117 u32 state_sid; 181 u32 state_sid;
118 int rc;
119 182
120 if (!xp->security) 183 if (!xp->security)
121 if (x->security) 184 if (x->security)
@@ -138,187 +201,111 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
138 if (fl->flowi_secid != state_sid) 201 if (fl->flowi_secid != state_sid)
139 return 0; 202 return 0;
140 203
141 rc = avc_has_perm(fl->flowi_secid, state_sid, SECCLASS_ASSOCIATION, 204 /* We don't need a separate SA Vs. policy polmatch check since the SA
142 ASSOCIATION__SENDTO, 205 * is now of the same label as the flow and a flow Vs. policy polmatch
143 NULL)? 0:1; 206 * check had already happened in selinux_xfrm_policy_lookup() above. */
144 207 return (avc_has_perm(fl->flowi_secid, state_sid,
145 /* 208 SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
146 * We don't need a separate SA Vs. policy polmatch check 209 NULL) ? 0 : 1);
147 * since the SA is now of the same label as the flow and
148 * a flow Vs. policy polmatch check had already happened
149 * in selinux_xfrm_policy_lookup() above.
150 */
151
152 return rc;
153} 210}
154 211
155/* 212static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
156 * LSM hook implementation that checks and/or returns the xfrm sid for the
157 * incoming packet.
158 */
159
160int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
161{ 213{
162 struct sec_path *sp; 214 struct dst_entry *dst = skb_dst(skb);
215 struct xfrm_state *x;
163 216
164 *sid = SECSID_NULL; 217 if (dst == NULL)
218 return SECSID_NULL;
219 x = dst->xfrm;
220 if (x == NULL || !selinux_authorizable_xfrm(x))
221 return SECSID_NULL;
165 222
166 if (skb == NULL) 223 return x->security->ctx_sid;
167 return 0; 224}
225
226static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
227 u32 *sid, int ckall)
228{
229 u32 sid_session = SECSID_NULL;
230 struct sec_path *sp = skb->sp;
168 231
169 sp = skb->sp;
170 if (sp) { 232 if (sp) {
171 int i, sid_set = 0; 233 int i;
172 234
173 for (i = sp->len-1; i >= 0; i--) { 235 for (i = sp->len - 1; i >= 0; i--) {
174 struct xfrm_state *x = sp->xvec[i]; 236 struct xfrm_state *x = sp->xvec[i];
175 if (selinux_authorizable_xfrm(x)) { 237 if (selinux_authorizable_xfrm(x)) {
176 struct xfrm_sec_ctx *ctx = x->security; 238 struct xfrm_sec_ctx *ctx = x->security;
177 239
178 if (!sid_set) { 240 if (sid_session == SECSID_NULL) {
179 *sid = ctx->ctx_sid; 241 sid_session = ctx->ctx_sid;
180 sid_set = 1;
181
182 if (!ckall) 242 if (!ckall)
183 break; 243 goto out;
184 } else if (*sid != ctx->ctx_sid) 244 } else if (sid_session != ctx->ctx_sid) {
245 *sid = SECSID_NULL;
185 return -EINVAL; 246 return -EINVAL;
247 }
186 } 248 }
187 } 249 }
188 } 250 }
189 251
252out:
253 *sid = sid_session;
190 return 0; 254 return 0;
191} 255}
192 256
193/* 257/*
194 * Security blob allocation for xfrm_policy and xfrm_state 258 * LSM hook implementation that checks and/or returns the xfrm sid for the
195 * CTX does not have a meaningful value on input 259 * incoming packet.
196 */ 260 */
197static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp, 261int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
198 struct xfrm_user_sec_ctx *uctx, u32 sid)
199{ 262{
200 int rc = 0; 263 if (skb == NULL) {
201 const struct task_security_struct *tsec = current_security(); 264 *sid = SECSID_NULL;
202 struct xfrm_sec_ctx *ctx = NULL; 265 return 0;
203 char *ctx_str = NULL;
204 u32 str_len;
205
206 BUG_ON(uctx && sid);
207
208 if (!uctx)
209 goto not_from_user;
210
211 if (uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
212 return -EINVAL;
213
214 str_len = uctx->ctx_len;
215 if (str_len >= PAGE_SIZE)
216 return -ENOMEM;
217
218 *ctxp = ctx = kmalloc(sizeof(*ctx) +
219 str_len + 1,
220 GFP_KERNEL);
221
222 if (!ctx)
223 return -ENOMEM;
224
225 ctx->ctx_doi = uctx->ctx_doi;
226 ctx->ctx_len = str_len;
227 ctx->ctx_alg = uctx->ctx_alg;
228
229 memcpy(ctx->ctx_str,
230 uctx+1,
231 str_len);
232 ctx->ctx_str[str_len] = 0;
233 rc = security_context_to_sid(ctx->ctx_str,
234 str_len,
235 &ctx->ctx_sid);
236
237 if (rc)
238 goto out;
239
240 /*
241 * Does the subject have permission to set security context?
242 */
243 rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
244 SECCLASS_ASSOCIATION,
245 ASSOCIATION__SETCONTEXT, NULL);
246 if (rc)
247 goto out;
248
249 return rc;
250
251not_from_user:
252 rc = security_sid_to_context(sid, &ctx_str, &str_len);
253 if (rc)
254 goto out;
255
256 *ctxp = ctx = kmalloc(sizeof(*ctx) +
257 str_len,
258 GFP_ATOMIC);
259
260 if (!ctx) {
261 rc = -ENOMEM;
262 goto out;
263 } 266 }
267 return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
268}
264 269
265 ctx->ctx_doi = XFRM_SC_DOI_LSM; 270int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
266 ctx->ctx_alg = XFRM_SC_ALG_SELINUX; 271{
267 ctx->ctx_sid = sid; 272 int rc;
268 ctx->ctx_len = str_len;
269 memcpy(ctx->ctx_str,
270 ctx_str,
271 str_len);
272 273
273 goto out2; 274 rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
275 if (rc == 0 && *sid == SECSID_NULL)
276 *sid = selinux_xfrm_skb_sid_egress(skb);
274 277
275out:
276 *ctxp = NULL;
277 kfree(ctx);
278out2:
279 kfree(ctx_str);
280 return rc; 278 return rc;
281} 279}
282 280
283/* 281/*
284 * LSM hook implementation that allocs and transfers uctx spec to 282 * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
285 * xfrm_policy.
286 */ 283 */
287int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, 284int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
288 struct xfrm_user_sec_ctx *uctx) 285 struct xfrm_user_sec_ctx *uctx)
289{ 286{
290 int err; 287 return selinux_xfrm_alloc_user(ctxp, uctx);
291
292 BUG_ON(!uctx);
293
294 err = selinux_xfrm_sec_ctx_alloc(ctxp, uctx, 0);
295 if (err == 0)
296 atomic_inc(&selinux_xfrm_refcount);
297
298 return err;
299} 288}
300 289
301
302/* 290/*
303 * LSM hook implementation that copies security data structure from old to 291 * LSM hook implementation that copies security data structure from old to new
304 * new for policy cloning. 292 * for policy cloning.
305 */ 293 */
306int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, 294int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
307 struct xfrm_sec_ctx **new_ctxp) 295 struct xfrm_sec_ctx **new_ctxp)
308{ 296{
309 struct xfrm_sec_ctx *new_ctx; 297 struct xfrm_sec_ctx *new_ctx;
310 298
311 if (old_ctx) { 299 if (!old_ctx)
312 new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, 300 return 0;
313 GFP_ATOMIC); 301
314 if (!new_ctx) 302 new_ctx = kmemdup(old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len,
315 return -ENOMEM; 303 GFP_ATOMIC);
304 if (!new_ctx)
305 return -ENOMEM;
306 atomic_inc(&selinux_xfrm_refcount);
307 *new_ctxp = new_ctx;
316 308
317 memcpy(new_ctx, old_ctx, sizeof(*new_ctx));
318 memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len);
319 atomic_inc(&selinux_xfrm_refcount);
320 *new_ctxp = new_ctx;
321 }
322 return 0; 309 return 0;
323} 310}
324 311
@@ -327,8 +314,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
327 */ 314 */
328void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) 315void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
329{ 316{
330 atomic_dec(&selinux_xfrm_refcount); 317 selinux_xfrm_free(ctx);
331 kfree(ctx);
332} 318}
333 319
334/* 320/*
@@ -336,31 +322,58 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
336 */ 322 */
337int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) 323int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
338{ 324{
339 const struct task_security_struct *tsec = current_security(); 325 return selinux_xfrm_delete(ctx);
340 326}
341 if (!ctx)
342 return 0;
343 327
344 return avc_has_perm(tsec->sid, ctx->ctx_sid, 328/*
345 SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, 329 * LSM hook implementation that allocates a xfrm_sec_state, populates it using
346 NULL); 330 * the supplied security context, and assigns it to the xfrm_state.
331 */
332int selinux_xfrm_state_alloc(struct xfrm_state *x,
333 struct xfrm_user_sec_ctx *uctx)
334{
335 return selinux_xfrm_alloc_user(&x->security, uctx);
347} 336}
348 337
349/* 338/*
350 * LSM hook implementation that allocs and transfers sec_ctx spec to 339 * LSM hook implementation that allocates a xfrm_sec_state and populates based
351 * xfrm_state. 340 * on a secid.
352 */ 341 */
353int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx, 342int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
354 u32 secid) 343 struct xfrm_sec_ctx *polsec, u32 secid)
355{ 344{
356 int err; 345 int rc;
346 struct xfrm_sec_ctx *ctx;
347 char *ctx_str = NULL;
348 int str_len;
349
350 if (!polsec)
351 return 0;
352
353 if (secid == 0)
354 return -EINVAL;
355
356 rc = security_sid_to_context(secid, &ctx_str, &str_len);
357 if (rc)
358 return rc;
357 359
358 BUG_ON(!x); 360 ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC);
361 if (!ctx) {
362 rc = -ENOMEM;
363 goto out;
364 }
359 365
360 err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, secid); 366 ctx->ctx_doi = XFRM_SC_DOI_LSM;
361 if (err == 0) 367 ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
362 atomic_inc(&selinux_xfrm_refcount); 368 ctx->ctx_sid = secid;
363 return err; 369 ctx->ctx_len = str_len;
370 memcpy(ctx->ctx_str, ctx_str, str_len);
371
372 x->security = ctx;
373 atomic_inc(&selinux_xfrm_refcount);
374out:
375 kfree(ctx_str);
376 return rc;
364} 377}
365 378
366/* 379/*
@@ -368,24 +381,15 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct
368 */ 381 */
369void selinux_xfrm_state_free(struct xfrm_state *x) 382void selinux_xfrm_state_free(struct xfrm_state *x)
370{ 383{
371 atomic_dec(&selinux_xfrm_refcount); 384 selinux_xfrm_free(x->security);
372 kfree(x->security);
373} 385}
374 386
375 /* 387/*
376 * LSM hook implementation that authorizes deletion of labeled SAs. 388 * LSM hook implementation that authorizes deletion of labeled SAs.
377 */ 389 */
378int selinux_xfrm_state_delete(struct xfrm_state *x) 390int selinux_xfrm_state_delete(struct xfrm_state *x)
379{ 391{
380 const struct task_security_struct *tsec = current_security(); 392 return selinux_xfrm_delete(x->security);
381 struct xfrm_sec_ctx *ctx = x->security;
382
383 if (!ctx)
384 return 0;
385
386 return avc_has_perm(tsec->sid, ctx->ctx_sid,
387 SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
388 NULL);
389} 393}
390 394
391/* 395/*
@@ -395,14 +399,12 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
395 * we need to check for unlabelled access since this may not have 399 * we need to check for unlabelled access since this may not have
396 * gone thru the IPSec process. 400 * gone thru the IPSec process.
397 */ 401 */
398int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, 402int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
399 struct common_audit_data *ad) 403 struct common_audit_data *ad)
400{ 404{
401 int i, rc = 0; 405 int i;
402 struct sec_path *sp; 406 struct sec_path *sp = skb->sp;
403 u32 sel_sid = SECINITSID_UNLABELED; 407 u32 peer_sid = SECINITSID_UNLABELED;
404
405 sp = skb->sp;
406 408
407 if (sp) { 409 if (sp) {
408 for (i = 0; i < sp->len; i++) { 410 for (i = 0; i < sp->len; i++) {
@@ -410,23 +412,17 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
410 412
411 if (x && selinux_authorizable_xfrm(x)) { 413 if (x && selinux_authorizable_xfrm(x)) {
412 struct xfrm_sec_ctx *ctx = x->security; 414 struct xfrm_sec_ctx *ctx = x->security;
413 sel_sid = ctx->ctx_sid; 415 peer_sid = ctx->ctx_sid;
414 break; 416 break;
415 } 417 }
416 } 418 }
417 } 419 }
418 420
419 /* 421 /* This check even when there's no association involved is intended,
420 * This check even when there's no association involved is 422 * according to Trent Jaeger, to make sure a process can't engage in
421 * intended, according to Trent Jaeger, to make sure a 423 * non-IPsec communication unless explicitly allowed by policy. */
422 * process can't engage in non-ipsec communication unless 424 return avc_has_perm(sk_sid, peer_sid,
423 * explicitly allowed by policy. 425 SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad);
424 */
425
426 rc = avc_has_perm(isec_sid, sel_sid, SECCLASS_ASSOCIATION,
427 ASSOCIATION__RECVFROM, ad);
428
429 return rc;
430} 426}
431 427
432/* 428/*
@@ -436,49 +432,38 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
436 * If we do have a authorizable security association, then it has already been 432 * If we do have a authorizable security association, then it has already been
437 * checked in the selinux_xfrm_state_pol_flow_match hook above. 433 * checked in the selinux_xfrm_state_pol_flow_match hook above.
438 */ 434 */
439int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, 435int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
440 struct common_audit_data *ad, u8 proto) 436 struct common_audit_data *ad, u8 proto)
441{ 437{
442 struct dst_entry *dst; 438 struct dst_entry *dst;
443 int rc = 0;
444
445 dst = skb_dst(skb);
446
447 if (dst) {
448 struct dst_entry *dst_test;
449
450 for (dst_test = dst; dst_test != NULL;
451 dst_test = dst_test->child) {
452 struct xfrm_state *x = dst_test->xfrm;
453
454 if (x && selinux_authorizable_xfrm(x))
455 goto out;
456 }
457 }
458 439
459 switch (proto) { 440 switch (proto) {
460 case IPPROTO_AH: 441 case IPPROTO_AH:
461 case IPPROTO_ESP: 442 case IPPROTO_ESP:
462 case IPPROTO_COMP: 443 case IPPROTO_COMP:
463 /* 444 /* We should have already seen this packet once before it
464 * We should have already seen this packet once before 445 * underwent xfrm(s). No need to subject it to the unlabeled
465 * it underwent xfrm(s). No need to subject it to the 446 * check. */
466 * unlabeled check. 447 return 0;
467 */
468 goto out;
469 default: 448 default:
470 break; 449 break;
471 } 450 }
472 451
473 /* 452 dst = skb_dst(skb);
474 * This check even when there's no association involved is 453 if (dst) {
475 * intended, according to Trent Jaeger, to make sure a 454 struct dst_entry *iter;
476 * process can't engage in non-ipsec communication unless
477 * explicitly allowed by policy.
478 */
479 455
480 rc = avc_has_perm(isec_sid, SECINITSID_UNLABELED, SECCLASS_ASSOCIATION, 456 for (iter = dst; iter != NULL; iter = iter->child) {
481 ASSOCIATION__SENDTO, ad); 457 struct xfrm_state *x = iter->xfrm;
482out: 458
483 return rc; 459 if (x && selinux_authorizable_xfrm(x))
460 return 0;
461 }
462 }
463
464 /* This check even when there's no association involved is intended,
465 * according to Trent Jaeger, to make sure a process can't engage in
466 * non-IPsec communication unless explicitly allowed by policy. */
467 return avc_has_perm(sk_sid, SECINITSID_UNLABELED,
468 SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad);
484} 469}
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 076b8e8a51ab..364cc64fce71 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -177,9 +177,13 @@ struct smk_port_label {
177#define SMACK_CIPSO_MAXCATNUM 184 /* 23 * 8 */ 177#define SMACK_CIPSO_MAXCATNUM 184 /* 23 * 8 */
178 178
179/* 179/*
180 * Flag for transmute access 180 * Flags for untraditional access modes.
181 * It shouldn't be necessary to avoid conflicts with definitions
182 * in fs.h, but do so anyway.
181 */ 183 */
182#define MAY_TRANSMUTE 64 184#define MAY_TRANSMUTE 0x00001000 /* Controls directory labeling */
185#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */
186
183/* 187/*
184 * Just to make the common cases easier to deal with 188 * Just to make the common cases easier to deal with
185 */ 189 */
@@ -188,9 +192,9 @@ struct smk_port_label {
188#define MAY_NOT 0 192#define MAY_NOT 0
189 193
190/* 194/*
191 * Number of access types used by Smack (rwxat) 195 * Number of access types used by Smack (rwxatl)
192 */ 196 */
193#define SMK_NUM_ACCESS_TYPE 5 197#define SMK_NUM_ACCESS_TYPE 6
194 198
195/* SMACK data */ 199/* SMACK data */
196struct smack_audit_data { 200struct smack_audit_data {
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index b3b59b1e93d6..14293cd9b1e5 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -84,6 +84,8 @@ int log_policy = SMACK_AUDIT_DENIED;
84 * 84 *
85 * Do the object check first because that is more 85 * Do the object check first because that is more
86 * likely to differ. 86 * likely to differ.
87 *
88 * Allowing write access implies allowing locking.
87 */ 89 */
88int smk_access_entry(char *subject_label, char *object_label, 90int smk_access_entry(char *subject_label, char *object_label,
89 struct list_head *rule_list) 91 struct list_head *rule_list)
@@ -99,6 +101,11 @@ int smk_access_entry(char *subject_label, char *object_label,
99 } 101 }
100 } 102 }
101 103
104 /*
105 * MAY_WRITE implies MAY_LOCK.
106 */
107 if ((may & MAY_WRITE) == MAY_WRITE)
108 may |= MAY_LOCK;
102 return may; 109 return may;
103} 110}
104 111
@@ -245,6 +252,7 @@ out_audit:
245static inline void smack_str_from_perm(char *string, int access) 252static inline void smack_str_from_perm(char *string, int access)
246{ 253{
247 int i = 0; 254 int i = 0;
255
248 if (access & MAY_READ) 256 if (access & MAY_READ)
249 string[i++] = 'r'; 257 string[i++] = 'r';
250 if (access & MAY_WRITE) 258 if (access & MAY_WRITE)
@@ -255,6 +263,8 @@ static inline void smack_str_from_perm(char *string, int access)
255 string[i++] = 'a'; 263 string[i++] = 'a';
256 if (access & MAY_TRANSMUTE) 264 if (access & MAY_TRANSMUTE)
257 string[i++] = 't'; 265 string[i++] = 't';
266 if (access & MAY_LOCK)
267 string[i++] = 'l';
258 string[i] = '\0'; 268 string[i] = '\0';
259} 269}
260/** 270/**
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 8825375cc031..b0be893ad44d 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -185,7 +185,7 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
185 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); 185 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
186 smk_ad_setfield_u_tsk(&ad, ctp); 186 smk_ad_setfield_u_tsk(&ad, ctp);
187 187
188 rc = smk_curacc(skp->smk_known, MAY_READWRITE, &ad); 188 rc = smk_curacc(skp->smk_known, mode, &ad);
189 return rc; 189 return rc;
190} 190}
191 191
@@ -1146,7 +1146,7 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
1146 * @file: the object 1146 * @file: the object
1147 * @cmd: unused 1147 * @cmd: unused
1148 * 1148 *
1149 * Returns 0 if current has write access, error code otherwise 1149 * Returns 0 if current has lock access, error code otherwise
1150 */ 1150 */
1151static int smack_file_lock(struct file *file, unsigned int cmd) 1151static int smack_file_lock(struct file *file, unsigned int cmd)
1152{ 1152{
@@ -1154,7 +1154,7 @@ static int smack_file_lock(struct file *file, unsigned int cmd)
1154 1154
1155 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); 1155 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
1156 smk_ad_setfield_u_fs_path(&ad, file->f_path); 1156 smk_ad_setfield_u_fs_path(&ad, file->f_path);
1157 return smk_curacc(file->f_security, MAY_WRITE, &ad); 1157 return smk_curacc(file->f_security, MAY_LOCK, &ad);
1158} 1158}
1159 1159
1160/** 1160/**
@@ -1178,8 +1178,13 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
1178 1178
1179 switch (cmd) { 1179 switch (cmd) {
1180 case F_GETLK: 1180 case F_GETLK:
1181 break;
1181 case F_SETLK: 1182 case F_SETLK:
1182 case F_SETLKW: 1183 case F_SETLKW:
1184 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
1185 smk_ad_setfield_u_fs_path(&ad, file->f_path);
1186 rc = smk_curacc(file->f_security, MAY_LOCK, &ad);
1187 break;
1183 case F_SETOWN: 1188 case F_SETOWN:
1184 case F_SETSIG: 1189 case F_SETSIG:
1185 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); 1190 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 80f4b4a45725..160aa08e3cd5 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -139,7 +139,7 @@ const char *smack_cipso_option = SMACK_CIPSO_OPTION;
139 * SMK_LOADLEN: Smack rule length 139 * SMK_LOADLEN: Smack rule length
140 */ 140 */
141#define SMK_OACCESS "rwxa" 141#define SMK_OACCESS "rwxa"
142#define SMK_ACCESS "rwxat" 142#define SMK_ACCESS "rwxatl"
143#define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1) 143#define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1)
144#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1) 144#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1)
145#define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN) 145#define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN)
@@ -282,6 +282,10 @@ static int smk_perm_from_str(const char *string)
282 case 'T': 282 case 'T':
283 perm |= MAY_TRANSMUTE; 283 perm |= MAY_TRANSMUTE;
284 break; 284 break;
285 case 'l':
286 case 'L':
287 perm |= MAY_LOCK;
288 break;
285 default: 289 default:
286 return perm; 290 return perm;
287 } 291 }
@@ -452,7 +456,7 @@ static ssize_t smk_write_rules_list(struct file *file, const char __user *buf,
452 /* 456 /*
453 * Minor hack for backward compatibility 457 * Minor hack for backward compatibility
454 */ 458 */
455 if (count != SMK_OLOADLEN && count != SMK_LOADLEN) 459 if (count < SMK_OLOADLEN || count > SMK_LOADLEN)
456 return -EINVAL; 460 return -EINVAL;
457 } else { 461 } else {
458 if (count >= PAGE_SIZE) { 462 if (count >= PAGE_SIZE) {
@@ -592,6 +596,8 @@ static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
592 seq_putc(s, 'a'); 596 seq_putc(s, 'a');
593 if (srp->smk_access & MAY_TRANSMUTE) 597 if (srp->smk_access & MAY_TRANSMUTE)
594 seq_putc(s, 't'); 598 seq_putc(s, 't');
599 if (srp->smk_access & MAY_LOCK)
600 seq_putc(s, 'l');
595 601
596 seq_putc(s, '\n'); 602 seq_putc(s, '\n');
597} 603}
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index 872d59e35ee2..721d8fd45685 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -357,7 +357,8 @@ static int set_sample_rates(struct atmel_abdac *dac)
357 if (new_rate < 0) 357 if (new_rate < 0)
358 break; 358 break;
359 /* make sure we are below the ABDAC clock */ 359 /* make sure we are below the ABDAC clock */
360 if (new_rate <= clk_get_rate(dac->pclk)) { 360 if (index < MAX_NUM_RATES &&
361 new_rate <= clk_get_rate(dac->pclk)) {
361 dac->rates[index] = new_rate / 256; 362 dac->rates[index] = new_rate / 256;
362 index++; 363 index++;
363 } 364 }
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index d3226892ad6b..9048777228e2 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -434,17 +434,14 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
434 return; 434 return;
435 index = s->packet_index; 435 index = s->packet_index;
436 436
437 /* this module generate empty packet for 'no data' */
437 syt = calculate_syt(s, cycle); 438 syt = calculate_syt(s, cycle);
438 if (!(s->flags & CIP_BLOCKING)) { 439 if (!(s->flags & CIP_BLOCKING))
439 data_blocks = calculate_data_blocks(s); 440 data_blocks = calculate_data_blocks(s);
440 } else { 441 else if (syt != 0xffff)
441 if (syt != 0xffff) { 442 data_blocks = s->syt_interval;
442 data_blocks = s->syt_interval; 443 else
443 } else { 444 data_blocks = 0;
444 data_blocks = 0;
445 syt = 0xffffff;
446 }
447 }
448 445
449 buffer = s->buffer.packets[index].buffer; 446 buffer = s->buffer.packets[index].buffer;
450 buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) | 447 buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index 839ebf812d79..2746ecd291af 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -4,6 +4,7 @@
4#include <linux/err.h> 4#include <linux/err.h>
5#include <linux/interrupt.h> 5#include <linux/interrupt.h>
6#include <linux/mutex.h> 6#include <linux/mutex.h>
7#include <sound/asound.h>
7#include "packets-buffer.h" 8#include "packets-buffer.h"
8 9
9/** 10/**
diff --git a/sound/firewire/dice.c b/sound/firewire/dice.c
index 57bcd31fcc12..c0aa64941cee 100644
--- a/sound/firewire/dice.c
+++ b/sound/firewire/dice.c
@@ -1019,7 +1019,7 @@ static void dice_proc_read(struct snd_info_entry *entry,
1019 1019
1020 if (dice_proc_read_mem(dice, &tx_rx_header, sections[2], 2) < 0) 1020 if (dice_proc_read_mem(dice, &tx_rx_header, sections[2], 2) < 0)
1021 return; 1021 return;
1022 quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx)); 1022 quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx) / 4);
1023 for (stream = 0; stream < tx_rx_header.number; ++stream) { 1023 for (stream = 0; stream < tx_rx_header.number; ++stream) {
1024 if (dice_proc_read_mem(dice, &buf.tx, sections[2] + 2 + 1024 if (dice_proc_read_mem(dice, &buf.tx, sections[2] + 2 +
1025 stream * tx_rx_header.size, 1025 stream * tx_rx_header.size,
@@ -1045,7 +1045,7 @@ static void dice_proc_read(struct snd_info_entry *entry,
1045 1045
1046 if (dice_proc_read_mem(dice, &tx_rx_header, sections[4], 2) < 0) 1046 if (dice_proc_read_mem(dice, &tx_rx_header, sections[4], 2) < 0)
1047 return; 1047 return;
1048 quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.rx)); 1048 quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.rx) / 4);
1049 for (stream = 0; stream < tx_rx_header.number; ++stream) { 1049 for (stream = 0; stream < tx_rx_header.number; ++stream) {
1050 if (dice_proc_read_mem(dice, &buf.rx, sections[4] + 2 + 1050 if (dice_proc_read_mem(dice, &buf.rx, sections[4] + 2 +
1051 stream * tx_rx_header.size, 1051 stream * tx_rx_header.size,
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 8de66ccd7279..4cdd9ded4563 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -209,8 +209,9 @@ config SND_HDA_CODEC_CA0132
209 209
210config SND_HDA_CODEC_CA0132_DSP 210config SND_HDA_CODEC_CA0132_DSP
211 bool "Support new DSP code for CA0132 codec" 211 bool "Support new DSP code for CA0132 codec"
212 depends on SND_HDA_CODEC_CA0132 && FW_LOADER 212 depends on SND_HDA_CODEC_CA0132
213 select SND_HDA_DSP_LOADER 213 select SND_HDA_DSP_LOADER
214 select FW_LOADER
214 help 215 help
215 Say Y here to enable the DSP for Creative CA0132 for extended 216 Say Y here to enable the DSP for Creative CA0132 for extended
216 features like equalizer or echo cancellation. 217 features like equalizer or echo cancellation.
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index afb90f48867f..69178c4f4113 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4000,6 +4000,10 @@ static void hda_call_codec_resume(struct hda_codec *codec)
4000 * in the resume / power-save sequence 4000 * in the resume / power-save sequence
4001 */ 4001 */
4002 hda_keep_power_on(codec); 4002 hda_keep_power_on(codec);
4003 if (codec->pm_down_notified) {
4004 codec->pm_down_notified = 0;
4005 hda_call_pm_notify(codec->bus, true);
4006 }
4003 hda_set_power_state(codec, AC_PWRST_D0); 4007 hda_set_power_state(codec, AC_PWRST_D0);
4004 restore_shutup_pins(codec); 4008 restore_shutup_pins(codec);
4005 hda_exec_init_verbs(codec); 4009 hda_exec_init_verbs(codec);
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 77db69480c19..7aa9870040c1 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -698,7 +698,6 @@ struct hda_bus {
698 unsigned int in_reset:1; /* during reset operation */ 698 unsigned int in_reset:1; /* during reset operation */
699 unsigned int power_keep_link_on:1; /* don't power off HDA link */ 699 unsigned int power_keep_link_on:1; /* don't power off HDA link */
700 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ 700 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
701 unsigned int avoid_link_reset:1; /* don't reset link at runtime PM */
702 701
703 int primary_dig_out_type; /* primary digital out PCM type */ 702 int primary_dig_out_type; /* primary digital out PCM type */
704}; 703};
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 3067ed4fe3b2..c7f6d1cab606 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -474,6 +474,20 @@ static void invalidate_nid_path(struct hda_codec *codec, int idx)
474 memset(path, 0, sizeof(*path)); 474 memset(path, 0, sizeof(*path));
475} 475}
476 476
477/* return a DAC if paired to the given pin by codec driver */
478static hda_nid_t get_preferred_dac(struct hda_codec *codec, hda_nid_t pin)
479{
480 struct hda_gen_spec *spec = codec->spec;
481 const hda_nid_t *list = spec->preferred_dacs;
482
483 if (!list)
484 return 0;
485 for (; *list; list += 2)
486 if (*list == pin)
487 return list[1];
488 return 0;
489}
490
477/* look for an empty DAC slot */ 491/* look for an empty DAC slot */
478static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin, 492static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin,
479 bool is_digital) 493 bool is_digital)
@@ -1192,7 +1206,14 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
1192 continue; 1206 continue;
1193 } 1207 }
1194 1208
1195 dacs[i] = look_for_dac(codec, pin, false); 1209 dacs[i] = get_preferred_dac(codec, pin);
1210 if (dacs[i]) {
1211 if (is_dac_already_used(codec, dacs[i]))
1212 badness += bad->shared_primary;
1213 }
1214
1215 if (!dacs[i])
1216 dacs[i] = look_for_dac(codec, pin, false);
1196 if (!dacs[i] && !i) { 1217 if (!dacs[i] && !i) {
1197 /* try to steal the DAC of surrounds for the front */ 1218 /* try to steal the DAC of surrounds for the front */
1198 for (j = 1; j < num_outs; j++) { 1219 for (j = 1; j < num_outs; j++) {
@@ -2506,12 +2527,8 @@ static int create_out_jack_modes(struct hda_codec *codec, int num_pins,
2506 2527
2507 for (i = 0; i < num_pins; i++) { 2528 for (i = 0; i < num_pins; i++) {
2508 hda_nid_t pin = pins[i]; 2529 hda_nid_t pin = pins[i];
2509 if (pin == spec->hp_mic_pin) { 2530 if (pin == spec->hp_mic_pin)
2510 int ret = create_hp_mic_jack_mode(codec, pin);
2511 if (ret < 0)
2512 return ret;
2513 continue; 2531 continue;
2514 }
2515 if (get_out_jack_num_items(codec, pin) > 1) { 2532 if (get_out_jack_num_items(codec, pin) > 1) {
2516 struct snd_kcontrol_new *knew; 2533 struct snd_kcontrol_new *knew;
2517 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 2534 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
@@ -2764,7 +2781,7 @@ static int hp_mic_jack_mode_put(struct snd_kcontrol *kcontrol,
2764 val &= ~(AC_PINCTL_VREFEN | PIN_HP); 2781 val &= ~(AC_PINCTL_VREFEN | PIN_HP);
2765 val |= get_vref_idx(vref_caps, idx) | PIN_IN; 2782 val |= get_vref_idx(vref_caps, idx) | PIN_IN;
2766 } else 2783 } else
2767 val = snd_hda_get_default_vref(codec, nid); 2784 val = snd_hda_get_default_vref(codec, nid) | PIN_IN;
2768 } 2785 }
2769 snd_hda_set_pin_ctl_cache(codec, nid, val); 2786 snd_hda_set_pin_ctl_cache(codec, nid, val);
2770 call_hp_automute(codec, NULL); 2787 call_hp_automute(codec, NULL);
@@ -2784,9 +2801,6 @@ static int create_hp_mic_jack_mode(struct hda_codec *codec, hda_nid_t pin)
2784 struct hda_gen_spec *spec = codec->spec; 2801 struct hda_gen_spec *spec = codec->spec;
2785 struct snd_kcontrol_new *knew; 2802 struct snd_kcontrol_new *knew;
2786 2803
2787 if (get_out_jack_num_items(codec, pin) <= 1 &&
2788 get_in_jack_num_items(codec, pin) <= 1)
2789 return 0; /* no need */
2790 knew = snd_hda_gen_add_kctl(spec, "Headphone Mic Jack Mode", 2804 knew = snd_hda_gen_add_kctl(spec, "Headphone Mic Jack Mode",
2791 &hp_mic_jack_mode_enum); 2805 &hp_mic_jack_mode_enum);
2792 if (!knew) 2806 if (!knew)
@@ -2815,6 +2829,42 @@ static int add_loopback_list(struct hda_gen_spec *spec, hda_nid_t mix, int idx)
2815 return 0; 2829 return 0;
2816} 2830}
2817 2831
2832/* return true if either a volume or a mute amp is found for the given
2833 * aamix path; the amp has to be either in the mixer node or its direct leaf
2834 */
2835static bool look_for_mix_leaf_ctls(struct hda_codec *codec, hda_nid_t mix_nid,
2836 hda_nid_t pin, unsigned int *mix_val,
2837 unsigned int *mute_val)
2838{
2839 int idx, num_conns;
2840 const hda_nid_t *list;
2841 hda_nid_t nid;
2842
2843 idx = snd_hda_get_conn_index(codec, mix_nid, pin, true);
2844 if (idx < 0)
2845 return false;
2846
2847 *mix_val = *mute_val = 0;
2848 if (nid_has_volume(codec, mix_nid, HDA_INPUT))
2849 *mix_val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
2850 if (nid_has_mute(codec, mix_nid, HDA_INPUT))
2851 *mute_val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
2852 if (*mix_val && *mute_val)
2853 return true;
2854
2855 /* check leaf node */
2856 num_conns = snd_hda_get_conn_list(codec, mix_nid, &list);
2857 if (num_conns < idx)
2858 return false;
2859 nid = list[idx];
2860 if (!*mix_val && nid_has_volume(codec, nid, HDA_OUTPUT))
2861 *mix_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
2862 if (!*mute_val && nid_has_mute(codec, nid, HDA_OUTPUT))
2863 *mute_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
2864
2865 return *mix_val || *mute_val;
2866}
2867
2818/* create input playback/capture controls for the given pin */ 2868/* create input playback/capture controls for the given pin */
2819static int new_analog_input(struct hda_codec *codec, int input_idx, 2869static int new_analog_input(struct hda_codec *codec, int input_idx,
2820 hda_nid_t pin, const char *ctlname, int ctlidx, 2870 hda_nid_t pin, const char *ctlname, int ctlidx,
@@ -2822,12 +2872,11 @@ static int new_analog_input(struct hda_codec *codec, int input_idx,
2822{ 2872{
2823 struct hda_gen_spec *spec = codec->spec; 2873 struct hda_gen_spec *spec = codec->spec;
2824 struct nid_path *path; 2874 struct nid_path *path;
2825 unsigned int val; 2875 unsigned int mix_val, mute_val;
2826 int err, idx; 2876 int err, idx;
2827 2877
2828 if (!nid_has_volume(codec, mix_nid, HDA_INPUT) && 2878 if (!look_for_mix_leaf_ctls(codec, mix_nid, pin, &mix_val, &mute_val))
2829 !nid_has_mute(codec, mix_nid, HDA_INPUT)) 2879 return 0;
2830 return 0; /* no need for analog loopback */
2831 2880
2832 path = snd_hda_add_new_path(codec, pin, mix_nid, 0); 2881 path = snd_hda_add_new_path(codec, pin, mix_nid, 0);
2833 if (!path) 2882 if (!path)
@@ -2836,20 +2885,18 @@ static int new_analog_input(struct hda_codec *codec, int input_idx,
2836 spec->loopback_paths[input_idx] = snd_hda_get_path_idx(codec, path); 2885 spec->loopback_paths[input_idx] = snd_hda_get_path_idx(codec, path);
2837 2886
2838 idx = path->idx[path->depth - 1]; 2887 idx = path->idx[path->depth - 1];
2839 if (nid_has_volume(codec, mix_nid, HDA_INPUT)) { 2888 if (mix_val) {
2840 val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT); 2889 err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, mix_val);
2841 err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, val);
2842 if (err < 0) 2890 if (err < 0)
2843 return err; 2891 return err;
2844 path->ctls[NID_PATH_VOL_CTL] = val; 2892 path->ctls[NID_PATH_VOL_CTL] = mix_val;
2845 } 2893 }
2846 2894
2847 if (nid_has_mute(codec, mix_nid, HDA_INPUT)) { 2895 if (mute_val) {
2848 val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT); 2896 err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, mute_val);
2849 err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, val);
2850 if (err < 0) 2897 if (err < 0)
2851 return err; 2898 return err;
2852 path->ctls[NID_PATH_MUTE_CTL] = val; 2899 path->ctls[NID_PATH_MUTE_CTL] = mute_val;
2853 } 2900 }
2854 2901
2855 path->active = true; 2902 path->active = true;
@@ -4271,6 +4318,26 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
4271 return AC_PWRST_D3; 4318 return AC_PWRST_D3;
4272} 4319}
4273 4320
4321/* mute all aamix inputs initially; parse up to the first leaves */
4322static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
4323{
4324 int i, nums;
4325 const hda_nid_t *conn;
4326 bool has_amp;
4327
4328 nums = snd_hda_get_conn_list(codec, mix, &conn);
4329 has_amp = nid_has_mute(codec, mix, HDA_INPUT);
4330 for (i = 0; i < nums; i++) {
4331 if (has_amp)
4332 snd_hda_codec_amp_stereo(codec, mix,
4333 HDA_INPUT, i,
4334 0xff, HDA_AMP_MUTE);
4335 else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
4336 snd_hda_codec_amp_stereo(codec, conn[i],
4337 HDA_OUTPUT, 0,
4338 0xff, HDA_AMP_MUTE);
4339 }
4340}
4274 4341
4275/* 4342/*
4276 * Parse the given BIOS configuration and set up the hda_gen_spec 4343 * Parse the given BIOS configuration and set up the hda_gen_spec
@@ -4383,6 +4450,17 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
4383 if (err < 0) 4450 if (err < 0)
4384 return err; 4451 return err;
4385 4452
4453 /* create "Headphone Mic Jack Mode" if no input selection is
4454 * available (or user specifies add_jack_modes hint)
4455 */
4456 if (spec->hp_mic_pin &&
4457 (spec->auto_mic || spec->input_mux.num_items == 1 ||
4458 spec->add_jack_modes)) {
4459 err = create_hp_mic_jack_mode(codec, spec->hp_mic_pin);
4460 if (err < 0)
4461 return err;
4462 }
4463
4386 if (spec->add_jack_modes) { 4464 if (spec->add_jack_modes) {
4387 if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { 4465 if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
4388 err = create_out_jack_modes(codec, cfg->line_outs, 4466 err = create_out_jack_modes(codec, cfg->line_outs,
@@ -4398,6 +4476,10 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
4398 } 4476 }
4399 } 4477 }
4400 4478
4479 /* mute all aamix input initially */
4480 if (spec->mixer_nid)
4481 mute_all_mixer_nid(codec, spec->mixer_nid);
4482
4401 dig_only: 4483 dig_only:
4402 parse_digital(codec); 4484 parse_digital(codec);
4403 4485
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 7e45cb44d151..0929a06df812 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -249,6 +249,9 @@ struct hda_gen_spec {
249 const struct badness_table *main_out_badness; 249 const struct badness_table *main_out_badness;
250 const struct badness_table *extra_out_badness; 250 const struct badness_table *extra_out_badness;
251 251
252 /* preferred pin/DAC pairs; an array of paired NIDs */
253 const hda_nid_t *preferred_dacs;
254
252 /* loopback mixing mode */ 255 /* loopback mixing mode */
253 bool aamix_mode; 256 bool aamix_mode;
254 257
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7a09404579a7..27aa14007cbd 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2994,8 +2994,7 @@ static int azx_runtime_suspend(struct device *dev)
2994 STATESTS_INT_MASK); 2994 STATESTS_INT_MASK);
2995 2995
2996 azx_stop_chip(chip); 2996 azx_stop_chip(chip);
2997 if (!chip->bus->avoid_link_reset) 2997 azx_enter_link_reset(chip);
2998 azx_enter_link_reset(chip);
2999 azx_clear_irq_pending(chip); 2998 azx_clear_irq_pending(chip);
3000 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 2999 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
3001 hda_display_power(false); 3000 hda_display_power(false);
@@ -3877,7 +3876,8 @@ static int azx_probe(struct pci_dev *pci,
3877 } 3876 }
3878 3877
3879 dev++; 3878 dev++;
3880 complete_all(&chip->probe_wait); 3879 if (chip->disabled)
3880 complete_all(&chip->probe_wait);
3881 return 0; 3881 return 0;
3882 3882
3883out_free: 3883out_free:
@@ -3954,10 +3954,10 @@ static int azx_probe_continue(struct azx *chip)
3954 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) || chip->use_vga_switcheroo) 3954 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) || chip->use_vga_switcheroo)
3955 pm_runtime_put_noidle(&pci->dev); 3955 pm_runtime_put_noidle(&pci->dev);
3956 3956
3957 return 0;
3958
3959out_free: 3957out_free:
3960 chip->init_failed = 1; 3958 if (err < 0)
3959 chip->init_failed = 1;
3960 complete_all(&chip->probe_wait);
3961 return err; 3961 return err;
3962} 3962}
3963 3963
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 1a83559f4cbd..699262a3e07a 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -147,6 +147,8 @@ static void ad_vmaster_eapd_hook(void *private_data, int enabled)
147 147
148 if (!spec->eapd_nid) 148 if (!spec->eapd_nid)
149 return; 149 return;
150 if (codec->inv_eapd)
151 enabled = !enabled;
150 snd_hda_codec_update_cache(codec, spec->eapd_nid, 0, 152 snd_hda_codec_update_cache(codec, spec->eapd_nid, 0,
151 AC_VERB_SET_EAPD_BTLENABLE, 153 AC_VERB_SET_EAPD_BTLENABLE,
152 enabled ? 0x02 : 0x00); 154 enabled ? 0x02 : 0x00);
@@ -338,6 +340,14 @@ static int patch_ad1986a(struct hda_codec *codec)
338{ 340{
339 int err; 341 int err;
340 struct ad198x_spec *spec; 342 struct ad198x_spec *spec;
343 static hda_nid_t preferred_pairs[] = {
344 0x1a, 0x03,
345 0x1b, 0x03,
346 0x1c, 0x04,
347 0x1d, 0x05,
348 0x1e, 0x03,
349 0
350 };
341 351
342 err = alloc_ad_spec(codec); 352 err = alloc_ad_spec(codec);
343 if (err < 0) 353 if (err < 0)
@@ -358,6 +368,11 @@ static int patch_ad1986a(struct hda_codec *codec)
358 * So, let's disable the shared stream. 368 * So, let's disable the shared stream.
359 */ 369 */
360 spec->gen.multiout.no_share_stream = 1; 370 spec->gen.multiout.no_share_stream = 1;
371 /* give fixed DAC/pin pairs */
372 spec->gen.preferred_dacs = preferred_pairs;
373
374 /* AD1986A can't manage the dynamic pin on/off smoothly */
375 spec->gen.auto_mute_via_amp = 1;
361 376
362 snd_hda_pick_fixup(codec, ad1986a_fixup_models, ad1986a_fixup_tbl, 377 snd_hda_pick_fixup(codec, ad1986a_fixup_models, ad1986a_fixup_tbl,
363 ad1986a_fixups); 378 ad1986a_fixups);
@@ -962,6 +977,7 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
962 switch (action) { 977 switch (action) {
963 case HDA_FIXUP_ACT_PRE_PROBE: 978 case HDA_FIXUP_ACT_PRE_PROBE:
964 spec->gen.vmaster_mute.hook = ad1884_vmaster_hp_gpio_hook; 979 spec->gen.vmaster_mute.hook = ad1884_vmaster_hp_gpio_hook;
980 spec->gen.own_eapd_ctl = 1;
965 snd_hda_sequence_write_cache(codec, gpio_init_verbs); 981 snd_hda_sequence_write_cache(codec, gpio_init_verbs);
966 break; 982 break;
967 case HDA_FIXUP_ACT_PROBE: 983 case HDA_FIXUP_ACT_PROBE:
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c205bb1747fd..3fbf2883e06e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -2936,7 +2936,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2936 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), 2936 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
2937 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), 2937 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
2938 SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), 2938 SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
2939 SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
2940 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), 2939 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
2941 SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), 2940 SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
2942 SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), 2941 SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
@@ -3244,9 +3243,29 @@ enum {
3244#if IS_ENABLED(CONFIG_THINKPAD_ACPI) 3243#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
3245 3244
3246#include <linux/thinkpad_acpi.h> 3245#include <linux/thinkpad_acpi.h>
3246#include <acpi/acpi.h>
3247 3247
3248static int (*led_set_func)(int, bool); 3248static int (*led_set_func)(int, bool);
3249 3249
3250static acpi_status acpi_check_cb(acpi_handle handle, u32 lvl, void *context,
3251 void **rv)
3252{
3253 bool *found = context;
3254 *found = true;
3255 return AE_OK;
3256}
3257
3258static bool is_thinkpad(struct hda_codec *codec)
3259{
3260 bool found = false;
3261 if (codec->subsystem_id >> 16 != 0x17aa)
3262 return false;
3263 if (ACPI_SUCCESS(acpi_get_devices("LEN0068", acpi_check_cb, &found, NULL)) && found)
3264 return true;
3265 found = false;
3266 return ACPI_SUCCESS(acpi_get_devices("IBM0068", acpi_check_cb, &found, NULL)) && found;
3267}
3268
3250static void update_tpacpi_mute_led(void *private_data, int enabled) 3269static void update_tpacpi_mute_led(void *private_data, int enabled)
3251{ 3270{
3252 struct hda_codec *codec = private_data; 3271 struct hda_codec *codec = private_data;
@@ -3279,6 +3298,8 @@ static void cxt_fixup_thinkpad_acpi(struct hda_codec *codec,
3279 bool removefunc = false; 3298 bool removefunc = false;
3280 3299
3281 if (action == HDA_FIXUP_ACT_PROBE) { 3300 if (action == HDA_FIXUP_ACT_PROBE) {
3301 if (!is_thinkpad(codec))
3302 return;
3282 if (!led_set_func) 3303 if (!led_set_func)
3283 led_set_func = symbol_request(tpacpi_led_set); 3304 led_set_func = symbol_request(tpacpi_led_set);
3284 if (!led_set_func) { 3305 if (!led_set_func) {
@@ -3494,6 +3515,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
3494 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), 3515 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
3495 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), 3516 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
3496 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), 3517 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
3518 SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
3497 SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), 3519 SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
3498 SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205), 3520 SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
3499 {} 3521 {}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 08407bed093e..f281c8068557 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1142,32 +1142,34 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
1142 1142
1143static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll); 1143static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
1144 1144
1145static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) 1145static void jack_callback(struct hda_codec *codec, struct hda_jack_tbl *jack)
1146{ 1146{
1147 struct hdmi_spec *spec = codec->spec; 1147 struct hdmi_spec *spec = codec->spec;
1148 int pin_idx = pin_nid_to_pin_index(spec, jack->nid);
1149 if (pin_idx < 0)
1150 return;
1151
1152 if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
1153 snd_hda_jack_report_sync(codec);
1154}
1155
1156static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
1157{
1148 int tag = res >> AC_UNSOL_RES_TAG_SHIFT; 1158 int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
1149 int pin_nid;
1150 int pin_idx;
1151 struct hda_jack_tbl *jack; 1159 struct hda_jack_tbl *jack;
1152 int dev_entry = (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT; 1160 int dev_entry = (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
1153 1161
1154 jack = snd_hda_jack_tbl_get_from_tag(codec, tag); 1162 jack = snd_hda_jack_tbl_get_from_tag(codec, tag);
1155 if (!jack) 1163 if (!jack)
1156 return; 1164 return;
1157 pin_nid = jack->nid;
1158 jack->jack_dirty = 1; 1165 jack->jack_dirty = 1;
1159 1166
1160 _snd_printd(SND_PR_VERBOSE, 1167 _snd_printd(SND_PR_VERBOSE,
1161 "HDMI hot plug event: Codec=%d Pin=%d Device=%d Inactive=%d Presence_Detect=%d ELD_Valid=%d\n", 1168 "HDMI hot plug event: Codec=%d Pin=%d Device=%d Inactive=%d Presence_Detect=%d ELD_Valid=%d\n",
1162 codec->addr, pin_nid, dev_entry, !!(res & AC_UNSOL_RES_IA), 1169 codec->addr, jack->nid, dev_entry, !!(res & AC_UNSOL_RES_IA),
1163 !!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV)); 1170 !!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
1164 1171
1165 pin_idx = pin_nid_to_pin_index(spec, pin_nid); 1172 jack_callback(codec, jack);
1166 if (pin_idx < 0)
1167 return;
1168
1169 if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
1170 snd_hda_jack_report_sync(codec);
1171} 1173}
1172 1174
1173static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) 1175static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -2095,7 +2097,8 @@ static int generic_hdmi_init(struct hda_codec *codec)
2095 hda_nid_t pin_nid = per_pin->pin_nid; 2097 hda_nid_t pin_nid = per_pin->pin_nid;
2096 2098
2097 hdmi_init_pin(codec, pin_nid); 2099 hdmi_init_pin(codec, pin_nid);
2098 snd_hda_jack_detect_enable(codec, pin_nid, pin_nid); 2100 snd_hda_jack_detect_enable_callback(codec, pin_nid, pin_nid,
2101 codec->jackpoll_interval > 0 ? jack_callback : NULL);
2099 } 2102 }
2100 return 0; 2103 return 0;
2101} 2104}
@@ -2334,8 +2337,9 @@ static int simple_playback_build_controls(struct hda_codec *codec)
2334 int err; 2337 int err;
2335 2338
2336 per_cvt = get_cvt(spec, 0); 2339 per_cvt = get_cvt(spec, 0);
2337 err = snd_hda_create_spdif_out_ctls(codec, per_cvt->cvt_nid, 2340 err = snd_hda_create_dig_out_ctls(codec, per_cvt->cvt_nid,
2338 per_cvt->cvt_nid); 2341 per_cvt->cvt_nid,
2342 HDA_PCM_TYPE_HDMI);
2339 if (err < 0) 2343 if (err < 0)
2340 return err; 2344 return err;
2341 return simple_hdmi_build_jack(codec, 0); 2345 return simple_hdmi_build_jack(codec, 0);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 04d1e6be600e..34de5dc2fe9b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1512,6 +1512,7 @@ enum {
1512 ALC260_FIXUP_KN1, 1512 ALC260_FIXUP_KN1,
1513 ALC260_FIXUP_FSC_S7020, 1513 ALC260_FIXUP_FSC_S7020,
1514 ALC260_FIXUP_FSC_S7020_JWSE, 1514 ALC260_FIXUP_FSC_S7020_JWSE,
1515 ALC260_FIXUP_VAIO_PINS,
1515}; 1516};
1516 1517
1517static void alc260_gpio1_automute(struct hda_codec *codec) 1518static void alc260_gpio1_automute(struct hda_codec *codec)
@@ -1652,6 +1653,24 @@ static const struct hda_fixup alc260_fixups[] = {
1652 .chained = true, 1653 .chained = true,
1653 .chain_id = ALC260_FIXUP_FSC_S7020, 1654 .chain_id = ALC260_FIXUP_FSC_S7020,
1654 }, 1655 },
1656 [ALC260_FIXUP_VAIO_PINS] = {
1657 .type = HDA_FIXUP_PINS,
1658 .v.pins = (const struct hda_pintbl[]) {
1659 /* Pin configs are missing completely on some VAIOs */
1660 { 0x0f, 0x01211020 },
1661 { 0x10, 0x0001003f },
1662 { 0x11, 0x411111f0 },
1663 { 0x12, 0x01a15930 },
1664 { 0x13, 0x411111f0 },
1665 { 0x14, 0x411111f0 },
1666 { 0x15, 0x411111f0 },
1667 { 0x16, 0x411111f0 },
1668 { 0x17, 0x411111f0 },
1669 { 0x18, 0x411111f0 },
1670 { 0x19, 0x411111f0 },
1671 { }
1672 }
1673 },
1655}; 1674};
1656 1675
1657static const struct snd_pci_quirk alc260_fixup_tbl[] = { 1676static const struct snd_pci_quirk alc260_fixup_tbl[] = {
@@ -1660,6 +1679,8 @@ static const struct snd_pci_quirk alc260_fixup_tbl[] = {
1660 SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_FIXUP_GPIO1), 1679 SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_FIXUP_GPIO1),
1661 SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750), 1680 SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750),
1662 SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900), 1681 SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900),
1682 SND_PCI_QUIRK(0x104d, 0x81bb, "Sony VAIO", ALC260_FIXUP_VAIO_PINS),
1683 SND_PCI_QUIRK(0x104d, 0x81e2, "Sony VAIO TX", ALC260_FIXUP_HP_PIN_0F),
1663 SND_PCI_QUIRK(0x10cf, 0x1326, "FSC LifeBook S7020", ALC260_FIXUP_FSC_S7020), 1684 SND_PCI_QUIRK(0x10cf, 0x1326, "FSC LifeBook S7020", ALC260_FIXUP_FSC_S7020),
1664 SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1), 1685 SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1),
1665 SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1), 1686 SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1),
@@ -1759,8 +1780,11 @@ enum {
1759 ALC889_FIXUP_DAC_ROUTE, 1780 ALC889_FIXUP_DAC_ROUTE,
1760 ALC889_FIXUP_MBP_VREF, 1781 ALC889_FIXUP_MBP_VREF,
1761 ALC889_FIXUP_IMAC91_VREF, 1782 ALC889_FIXUP_IMAC91_VREF,
1783 ALC889_FIXUP_MBA21_VREF,
1762 ALC882_FIXUP_INV_DMIC, 1784 ALC882_FIXUP_INV_DMIC,
1763 ALC882_FIXUP_NO_PRIMARY_HP, 1785 ALC882_FIXUP_NO_PRIMARY_HP,
1786 ALC887_FIXUP_ASUS_BASS,
1787 ALC887_FIXUP_BASS_CHMAP,
1764}; 1788};
1765 1789
1766static void alc889_fixup_coef(struct hda_codec *codec, 1790static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1861,17 +1885,13 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
1861 } 1885 }
1862} 1886}
1863 1887
1864/* Set VREF on speaker pins on imac91 */ 1888static void alc889_fixup_mac_pins(struct hda_codec *codec,
1865static void alc889_fixup_imac91_vref(struct hda_codec *codec, 1889 const hda_nid_t *nids, int num_nids)
1866 const struct hda_fixup *fix, int action)
1867{ 1890{
1868 struct alc_spec *spec = codec->spec; 1891 struct alc_spec *spec = codec->spec;
1869 static hda_nid_t nids[2] = { 0x18, 0x1a };
1870 int i; 1892 int i;
1871 1893
1872 if (action != HDA_FIXUP_ACT_INIT) 1894 for (i = 0; i < num_nids; i++) {
1873 return;
1874 for (i = 0; i < ARRAY_SIZE(nids); i++) {
1875 unsigned int val; 1895 unsigned int val;
1876 val = snd_hda_codec_get_pin_target(codec, nids[i]); 1896 val = snd_hda_codec_get_pin_target(codec, nids[i]);
1877 val |= AC_PINCTL_VREF_50; 1897 val |= AC_PINCTL_VREF_50;
@@ -1880,6 +1900,26 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec,
1880 spec->gen.keep_vref_in_automute = 1; 1900 spec->gen.keep_vref_in_automute = 1;
1881} 1901}
1882 1902
1903/* Set VREF on speaker pins on imac91 */
1904static void alc889_fixup_imac91_vref(struct hda_codec *codec,
1905 const struct hda_fixup *fix, int action)
1906{
1907 static hda_nid_t nids[2] = { 0x18, 0x1a };
1908
1909 if (action == HDA_FIXUP_ACT_INIT)
1910 alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
1911}
1912
1913/* Set VREF on speaker pins on mba21 */
1914static void alc889_fixup_mba21_vref(struct hda_codec *codec,
1915 const struct hda_fixup *fix, int action)
1916{
1917 static hda_nid_t nids[2] = { 0x18, 0x19 };
1918
1919 if (action == HDA_FIXUP_ACT_INIT)
1920 alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
1921}
1922
1883/* Don't take HP output as primary 1923/* Don't take HP output as primary
1884 * Strangely, the speaker output doesn't work on Vaio Z and some Vaio 1924 * Strangely, the speaker output doesn't work on Vaio Z and some Vaio
1885 * all-in-one desktop PCs (for example VGC-LN51JGB) through DAC 0x05 1925 * all-in-one desktop PCs (for example VGC-LN51JGB) through DAC 0x05
@@ -1894,6 +1934,9 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
1894 } 1934 }
1895} 1935}
1896 1936
1937static void alc_fixup_bass_chmap(struct hda_codec *codec,
1938 const struct hda_fixup *fix, int action);
1939
1897static const struct hda_fixup alc882_fixups[] = { 1940static const struct hda_fixup alc882_fixups[] = {
1898 [ALC882_FIXUP_ABIT_AW9D_MAX] = { 1941 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
1899 .type = HDA_FIXUP_PINS, 1942 .type = HDA_FIXUP_PINS,
@@ -2076,6 +2119,12 @@ static const struct hda_fixup alc882_fixups[] = {
2076 .chained = true, 2119 .chained = true,
2077 .chain_id = ALC882_FIXUP_GPIO1, 2120 .chain_id = ALC882_FIXUP_GPIO1,
2078 }, 2121 },
2122 [ALC889_FIXUP_MBA21_VREF] = {
2123 .type = HDA_FIXUP_FUNC,
2124 .v.func = alc889_fixup_mba21_vref,
2125 .chained = true,
2126 .chain_id = ALC889_FIXUP_MBP_VREF,
2127 },
2079 [ALC882_FIXUP_INV_DMIC] = { 2128 [ALC882_FIXUP_INV_DMIC] = {
2080 .type = HDA_FIXUP_FUNC, 2129 .type = HDA_FIXUP_FUNC,
2081 .v.func = alc_fixup_inv_dmic_0x12, 2130 .v.func = alc_fixup_inv_dmic_0x12,
@@ -2084,6 +2133,19 @@ static const struct hda_fixup alc882_fixups[] = {
2084 .type = HDA_FIXUP_FUNC, 2133 .type = HDA_FIXUP_FUNC,
2085 .v.func = alc882_fixup_no_primary_hp, 2134 .v.func = alc882_fixup_no_primary_hp,
2086 }, 2135 },
2136 [ALC887_FIXUP_ASUS_BASS] = {
2137 .type = HDA_FIXUP_PINS,
2138 .v.pins = (const struct hda_pintbl[]) {
2139 {0x16, 0x99130130}, /* bass speaker */
2140 {}
2141 },
2142 .chained = true,
2143 .chain_id = ALC887_FIXUP_BASS_CHMAP,
2144 },
2145 [ALC887_FIXUP_BASS_CHMAP] = {
2146 .type = HDA_FIXUP_FUNC,
2147 .v.func = alc_fixup_bass_chmap,
2148 },
2087}; 2149};
2088 2150
2089static const struct snd_pci_quirk alc882_fixup_tbl[] = { 2151static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2117,6 +2179,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2117 SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V), 2179 SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
2118 SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), 2180 SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
2119 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), 2181 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
2182 SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
2120 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), 2183 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
2121 SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), 2184 SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
2122 SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), 2185 SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
@@ -2132,7 +2195,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2132 SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889_FIXUP_MBP_VREF), 2195 SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889_FIXUP_MBP_VREF),
2133 SND_PCI_QUIRK(0x106b, 0x3200, "iMac 7,1 Aluminum", ALC882_FIXUP_EAPD), 2196 SND_PCI_QUIRK(0x106b, 0x3200, "iMac 7,1 Aluminum", ALC882_FIXUP_EAPD),
2134 SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBP_VREF), 2197 SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBP_VREF),
2135 SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBP_VREF), 2198 SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBA21_VREF),
2136 SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889_FIXUP_MBP_VREF), 2199 SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889_FIXUP_MBP_VREF),
2137 SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF), 2200 SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
2138 SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_FIXUP_MACPRO_GPIO), 2201 SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_FIXUP_MACPRO_GPIO),
@@ -3247,6 +3310,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
3247 alc_write_coef_idx(codec, 0x18, 0x7388); 3310 alc_write_coef_idx(codec, 0x18, 0x7388);
3248 break; 3311 break;
3249 case 0x10ec0668: 3312 case 0x10ec0668:
3313 alc_write_coef_idx(codec, 0x11, 0x0001);
3250 alc_write_coef_idx(codec, 0x15, 0x0d60); 3314 alc_write_coef_idx(codec, 0x15, 0x0d60);
3251 alc_write_coef_idx(codec, 0xc3, 0x0000); 3315 alc_write_coef_idx(codec, 0xc3, 0x0000);
3252 break; 3316 break;
@@ -3275,6 +3339,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
3275 alc_write_coef_idx(codec, 0x18, 0x7388); 3339 alc_write_coef_idx(codec, 0x18, 0x7388);
3276 break; 3340 break;
3277 case 0x10ec0668: 3341 case 0x10ec0668:
3342 alc_write_coef_idx(codec, 0x11, 0x0001);
3278 alc_write_coef_idx(codec, 0x15, 0x0d50); 3343 alc_write_coef_idx(codec, 0x15, 0x0d50);
3279 alc_write_coef_idx(codec, 0xc3, 0x0000); 3344 alc_write_coef_idx(codec, 0xc3, 0x0000);
3280 break; 3345 break;
@@ -3393,7 +3458,7 @@ static void alc_update_headset_mode_hook(struct hda_codec *codec,
3393static void alc_update_headset_jack_cb(struct hda_codec *codec, struct hda_jack_tbl *jack) 3458static void alc_update_headset_jack_cb(struct hda_codec *codec, struct hda_jack_tbl *jack)
3394{ 3459{
3395 struct alc_spec *spec = codec->spec; 3460 struct alc_spec *spec = codec->spec;
3396 spec->current_headset_type = ALC_HEADSET_MODE_UNKNOWN; 3461 spec->current_headset_type = ALC_HEADSET_TYPE_UNKNOWN;
3397 snd_hda_gen_hp_automute(codec, jack); 3462 snd_hda_gen_hp_automute(codec, jack);
3398} 3463}
3399 3464
@@ -3560,11 +3625,6 @@ static void alc283_hp_automute_hook(struct hda_codec *codec,
3560 vref); 3625 vref);
3561} 3626}
3562 3627
3563static void alc283_chromebook_caps(struct hda_codec *codec)
3564{
3565 snd_hda_override_wcaps(codec, 0x03, 0);
3566}
3567
3568static void alc283_fixup_chromebook(struct hda_codec *codec, 3628static void alc283_fixup_chromebook(struct hda_codec *codec,
3569 const struct hda_fixup *fix, int action) 3629 const struct hda_fixup *fix, int action)
3570{ 3630{
@@ -3573,9 +3633,26 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
3573 3633
3574 switch (action) { 3634 switch (action) {
3575 case HDA_FIXUP_ACT_PRE_PROBE: 3635 case HDA_FIXUP_ACT_PRE_PROBE:
3576 alc283_chromebook_caps(codec); 3636 snd_hda_override_wcaps(codec, 0x03, 0);
3577 /* Disable AA-loopback as it causes white noise */ 3637 /* Disable AA-loopback as it causes white noise */
3578 spec->gen.mixer_nid = 0; 3638 spec->gen.mixer_nid = 0;
3639 break;
3640 case HDA_FIXUP_ACT_INIT:
3641 /* Enable Line1 input control by verb */
3642 val = alc_read_coef_idx(codec, 0x1a);
3643 alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
3644 break;
3645 }
3646}
3647
3648static void alc283_fixup_sense_combo_jack(struct hda_codec *codec,
3649 const struct hda_fixup *fix, int action)
3650{
3651 struct alc_spec *spec = codec->spec;
3652 int val;
3653
3654 switch (action) {
3655 case HDA_FIXUP_ACT_PRE_PROBE:
3579 spec->gen.hp_automute_hook = alc283_hp_automute_hook; 3656 spec->gen.hp_automute_hook = alc283_hp_automute_hook;
3580 break; 3657 break;
3581 case HDA_FIXUP_ACT_INIT: 3658 case HDA_FIXUP_ACT_INIT:
@@ -3583,9 +3660,6 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
3583 /* Set to manual mode */ 3660 /* Set to manual mode */
3584 val = alc_read_coef_idx(codec, 0x06); 3661 val = alc_read_coef_idx(codec, 0x06);
3585 alc_write_coef_idx(codec, 0x06, val & ~0x000c); 3662 alc_write_coef_idx(codec, 0x06, val & ~0x000c);
3586 /* Enable Line1 input control by verb */
3587 val = alc_read_coef_idx(codec, 0x1a);
3588 alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
3589 break; 3663 break;
3590 } 3664 }
3591} 3665}
@@ -3652,9 +3726,29 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
3652#if IS_ENABLED(CONFIG_THINKPAD_ACPI) 3726#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
3653 3727
3654#include <linux/thinkpad_acpi.h> 3728#include <linux/thinkpad_acpi.h>
3729#include <acpi/acpi.h>
3655 3730
3656static int (*led_set_func)(int, bool); 3731static int (*led_set_func)(int, bool);
3657 3732
3733static acpi_status acpi_check_cb(acpi_handle handle, u32 lvl, void *context,
3734 void **rv)
3735{
3736 bool *found = context;
3737 *found = true;
3738 return AE_OK;
3739}
3740
3741static bool is_thinkpad(struct hda_codec *codec)
3742{
3743 bool found = false;
3744 if (codec->subsystem_id >> 16 != 0x17aa)
3745 return false;
3746 if (ACPI_SUCCESS(acpi_get_devices("LEN0068", acpi_check_cb, &found, NULL)) && found)
3747 return true;
3748 found = false;
3749 return ACPI_SUCCESS(acpi_get_devices("IBM0068", acpi_check_cb, &found, NULL)) && found;
3750}
3751
3658static void update_tpacpi_mute_led(void *private_data, int enabled) 3752static void update_tpacpi_mute_led(void *private_data, int enabled)
3659{ 3753{
3660 if (led_set_func) 3754 if (led_set_func)
@@ -3680,6 +3774,8 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
3680 bool removefunc = false; 3774 bool removefunc = false;
3681 3775
3682 if (action == HDA_FIXUP_ACT_PROBE) { 3776 if (action == HDA_FIXUP_ACT_PROBE) {
3777 if (!is_thinkpad(codec))
3778 return;
3683 if (!led_set_func) 3779 if (!led_set_func)
3684 led_set_func = symbol_request(tpacpi_led_set); 3780 led_set_func = symbol_request(tpacpi_led_set);
3685 if (!led_set_func) { 3781 if (!led_set_func) {
@@ -3753,11 +3849,14 @@ enum {
3753 ALC269_FIXUP_ASUS_X101, 3849 ALC269_FIXUP_ASUS_X101,
3754 ALC271_FIXUP_AMIC_MIC2, 3850 ALC271_FIXUP_AMIC_MIC2,
3755 ALC271_FIXUP_HP_GATE_MIC_JACK, 3851 ALC271_FIXUP_HP_GATE_MIC_JACK,
3852 ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572,
3756 ALC269_FIXUP_ACER_AC700, 3853 ALC269_FIXUP_ACER_AC700,
3757 ALC269_FIXUP_LIMIT_INT_MIC_BOOST, 3854 ALC269_FIXUP_LIMIT_INT_MIC_BOOST,
3855 ALC269VB_FIXUP_ASUS_ZENBOOK,
3758 ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED, 3856 ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED,
3759 ALC269VB_FIXUP_ORDISSIMO_EVE2, 3857 ALC269VB_FIXUP_ORDISSIMO_EVE2,
3760 ALC283_FIXUP_CHROME_BOOK, 3858 ALC283_FIXUP_CHROME_BOOK,
3859 ALC283_FIXUP_SENSE_COMBO_JACK,
3761 ALC282_FIXUP_ASUS_TX300, 3860 ALC282_FIXUP_ASUS_TX300,
3762 ALC283_FIXUP_INT_MIC, 3861 ALC283_FIXUP_INT_MIC,
3763 ALC290_FIXUP_MONO_SPEAKERS, 3862 ALC290_FIXUP_MONO_SPEAKERS,
@@ -3923,6 +4022,8 @@ static const struct hda_fixup alc269_fixups[] = {
3923 [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = { 4022 [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
3924 .type = HDA_FIXUP_FUNC, 4023 .type = HDA_FIXUP_FUNC,
3925 .v.func = alc269_fixup_pincfg_no_hp_to_lineout, 4024 .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
4025 .chained = true,
4026 .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
3926 }, 4027 },
3927 [ALC269_FIXUP_DELL1_MIC_NO_PRESENCE] = { 4028 [ALC269_FIXUP_DELL1_MIC_NO_PRESENCE] = {
3928 .type = HDA_FIXUP_PINS, 4029 .type = HDA_FIXUP_PINS,
@@ -4011,6 +4112,12 @@ static const struct hda_fixup alc269_fixups[] = {
4011 .chained = true, 4112 .chained = true,
4012 .chain_id = ALC271_FIXUP_AMIC_MIC2, 4113 .chain_id = ALC271_FIXUP_AMIC_MIC2,
4013 }, 4114 },
4115 [ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572] = {
4116 .type = HDA_FIXUP_FUNC,
4117 .v.func = alc269_fixup_limit_int_mic_boost,
4118 .chained = true,
4119 .chain_id = ALC271_FIXUP_HP_GATE_MIC_JACK,
4120 },
4014 [ALC269_FIXUP_ACER_AC700] = { 4121 [ALC269_FIXUP_ACER_AC700] = {
4015 .type = HDA_FIXUP_PINS, 4122 .type = HDA_FIXUP_PINS,
4016 .v.pins = (const struct hda_pintbl[]) { 4123 .v.pins = (const struct hda_pintbl[]) {
@@ -4027,6 +4134,14 @@ static const struct hda_fixup alc269_fixups[] = {
4027 [ALC269_FIXUP_LIMIT_INT_MIC_BOOST] = { 4134 [ALC269_FIXUP_LIMIT_INT_MIC_BOOST] = {
4028 .type = HDA_FIXUP_FUNC, 4135 .type = HDA_FIXUP_FUNC,
4029 .v.func = alc269_fixup_limit_int_mic_boost, 4136 .v.func = alc269_fixup_limit_int_mic_boost,
4137 .chained = true,
4138 .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
4139 },
4140 [ALC269VB_FIXUP_ASUS_ZENBOOK] = {
4141 .type = HDA_FIXUP_FUNC,
4142 .v.func = alc269_fixup_limit_int_mic_boost,
4143 .chained = true,
4144 .chain_id = ALC269VB_FIXUP_DMIC,
4030 }, 4145 },
4031 [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = { 4146 [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = {
4032 .type = HDA_FIXUP_FUNC, 4147 .type = HDA_FIXUP_FUNC,
@@ -4047,6 +4162,12 @@ static const struct hda_fixup alc269_fixups[] = {
4047 .type = HDA_FIXUP_FUNC, 4162 .type = HDA_FIXUP_FUNC,
4048 .v.func = alc283_fixup_chromebook, 4163 .v.func = alc283_fixup_chromebook,
4049 }, 4164 },
4165 [ALC283_FIXUP_SENSE_COMBO_JACK] = {
4166 .type = HDA_FIXUP_FUNC,
4167 .v.func = alc283_fixup_sense_combo_jack,
4168 .chained = true,
4169 .chain_id = ALC283_FIXUP_CHROME_BOOK,
4170 },
4050 [ALC282_FIXUP_ASUS_TX300] = { 4171 [ALC282_FIXUP_ASUS_TX300] = {
4051 .type = HDA_FIXUP_FUNC, 4172 .type = HDA_FIXUP_FUNC,
4052 .v.func = alc282_fixup_asus_tx300, 4173 .v.func = alc282_fixup_asus_tx300,
@@ -4070,8 +4191,6 @@ static const struct hda_fixup alc269_fixups[] = {
4070 [ALC269_FIXUP_THINKPAD_ACPI] = { 4191 [ALC269_FIXUP_THINKPAD_ACPI] = {
4071 .type = HDA_FIXUP_FUNC, 4192 .type = HDA_FIXUP_FUNC,
4072 .v.func = alc_fixup_thinkpad_acpi, 4193 .v.func = alc_fixup_thinkpad_acpi,
4073 .chained = true,
4074 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
4075 }, 4194 },
4076 [ALC255_FIXUP_DELL1_MIC_NO_PRESENCE] = { 4195 [ALC255_FIXUP_DELL1_MIC_NO_PRESENCE] = {
4077 .type = HDA_FIXUP_PINS, 4196 .type = HDA_FIXUP_PINS,
@@ -4096,6 +4215,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4096 SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), 4215 SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
4097 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), 4216 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
4098 SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), 4217 SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
4218 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
4099 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 4219 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
4100 SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4220 SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4101 SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4221 SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4128,8 +4248,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4128 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4248 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4129 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4249 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4130 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4250 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4251 SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4131 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS), 4252 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
4132 SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4253 SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4254 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS),
4133 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4255 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4134 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4256 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4135 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4257 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4138,13 +4260,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4138 SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4260 SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4139 SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4261 SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4140 SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED), 4262 SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
4141 SND_PCI_QUIRK(0x103c, 0x21ed, "HP Falco Chromebook", ALC283_FIXUP_CHROME_BOOK),
4142 SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED), 4263 SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
4143 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 4264 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
4144 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4265 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4145 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4266 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4146 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC), 4267 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
4147 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC), 4268 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK),
4148 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 4269 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
4149 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 4270 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
4150 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), 4271 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -4173,7 +4294,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4173 SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK), 4294 SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
4174 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4295 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4175 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4296 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4176 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_THINKPAD_ACPI), 4297 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4177 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4298 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4178 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4299 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4179 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), 4300 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
@@ -4181,6 +4302,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4181 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4302 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4182 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 4303 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
4183 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 4304 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
4305 SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", ALC269_FIXUP_THINKPAD_ACPI),
4184 SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ 4306 SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
4185 4307
4186#if 0 4308#if 0
@@ -4245,6 +4367,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4245 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, 4367 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
4246 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, 4368 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
4247 {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, 4369 {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
4370 {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-chrome"},
4371 {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
4248 {} 4372 {}
4249}; 4373};
4250 4374
@@ -4420,6 +4544,7 @@ enum {
4420 ALC861_FIXUP_AMP_VREF_0F, 4544 ALC861_FIXUP_AMP_VREF_0F,
4421 ALC861_FIXUP_NO_JACK_DETECT, 4545 ALC861_FIXUP_NO_JACK_DETECT,
4422 ALC861_FIXUP_ASUS_A6RP, 4546 ALC861_FIXUP_ASUS_A6RP,
4547 ALC660_FIXUP_ASUS_W7J,
4423}; 4548};
4424 4549
4425/* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */ 4550/* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */
@@ -4469,10 +4594,22 @@ static const struct hda_fixup alc861_fixups[] = {
4469 .v.func = alc861_fixup_asus_amp_vref_0f, 4594 .v.func = alc861_fixup_asus_amp_vref_0f,
4470 .chained = true, 4595 .chained = true,
4471 .chain_id = ALC861_FIXUP_NO_JACK_DETECT, 4596 .chain_id = ALC861_FIXUP_NO_JACK_DETECT,
4597 },
4598 [ALC660_FIXUP_ASUS_W7J] = {
4599 .type = HDA_FIXUP_VERBS,
4600 .v.verbs = (const struct hda_verb[]) {
4601 /* ASUS W7J needs a magic pin setup on unused NID 0x10
4602 * for enabling outputs
4603 */
4604 {0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24},
4605 { }
4606 },
4472 } 4607 }
4473}; 4608};
4474 4609
4475static const struct snd_pci_quirk alc861_fixup_tbl[] = { 4610static const struct snd_pci_quirk alc861_fixup_tbl[] = {
4611 SND_PCI_QUIRK(0x1043, 0x1253, "ASUS W7J", ALC660_FIXUP_ASUS_W7J),
4612 SND_PCI_QUIRK(0x1043, 0x1263, "ASUS Z35HL", ALC660_FIXUP_ASUS_W7J),
4476 SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP), 4613 SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
4477 SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F), 4614 SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
4478 SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT), 4615 SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
@@ -4668,7 +4805,7 @@ static const struct snd_pcm_chmap_elem asus_pcm_2_1_chmaps[] = {
4668}; 4805};
4669 4806
4670/* override the 2.1 chmap */ 4807/* override the 2.1 chmap */
4671static void alc662_fixup_bass_chmap(struct hda_codec *codec, 4808static void alc_fixup_bass_chmap(struct hda_codec *codec,
4672 const struct hda_fixup *fix, int action) 4809 const struct hda_fixup *fix, int action)
4673{ 4810{
4674 if (action == HDA_FIXUP_ACT_BUILD) { 4811 if (action == HDA_FIXUP_ACT_BUILD) {
@@ -4698,6 +4835,8 @@ enum {
4698 ALC668_FIXUP_DELL_MIC_NO_PRESENCE, 4835 ALC668_FIXUP_DELL_MIC_NO_PRESENCE,
4699 ALC668_FIXUP_HEADSET_MODE, 4836 ALC668_FIXUP_HEADSET_MODE,
4700 ALC662_FIXUP_BASS_CHMAP, 4837 ALC662_FIXUP_BASS_CHMAP,
4838 ALC662_FIXUP_BASS_1A,
4839 ALC662_FIXUP_BASS_1A_CHMAP,
4701}; 4840};
4702 4841
4703static const struct hda_fixup alc662_fixups[] = { 4842static const struct hda_fixup alc662_fixups[] = {
@@ -4874,10 +5013,23 @@ static const struct hda_fixup alc662_fixups[] = {
4874 }, 5013 },
4875 [ALC662_FIXUP_BASS_CHMAP] = { 5014 [ALC662_FIXUP_BASS_CHMAP] = {
4876 .type = HDA_FIXUP_FUNC, 5015 .type = HDA_FIXUP_FUNC,
4877 .v.func = alc662_fixup_bass_chmap, 5016 .v.func = alc_fixup_bass_chmap,
4878 .chained = true, 5017 .chained = true,
4879 .chain_id = ALC662_FIXUP_ASUS_MODE4 5018 .chain_id = ALC662_FIXUP_ASUS_MODE4
4880 }, 5019 },
5020 [ALC662_FIXUP_BASS_1A] = {
5021 .type = HDA_FIXUP_PINS,
5022 .v.pins = (const struct hda_pintbl[]) {
5023 {0x1a, 0x80106111}, /* bass speaker */
5024 {}
5025 },
5026 },
5027 [ALC662_FIXUP_BASS_1A_CHMAP] = {
5028 .type = HDA_FIXUP_FUNC,
5029 .v.func = alc_fixup_bass_chmap,
5030 .chained = true,
5031 .chain_id = ALC662_FIXUP_BASS_1A,
5032 },
4881}; 5033};
4882 5034
4883static const struct snd_pci_quirk alc662_fixup_tbl[] = { 5035static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -4890,8 +5042,13 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4890 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), 5042 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
4891 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5043 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4892 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5044 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5045 SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5046 SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5047 SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4893 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5048 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5049 SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4894 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), 5050 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
5051 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
4895 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP), 5052 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
4896 SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_CHMAP), 5053 SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_CHMAP),
4897 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), 5054 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
@@ -5054,6 +5211,7 @@ static int patch_alc662(struct hda_codec *codec)
5054 case 0x10ec0272: 5211 case 0x10ec0272:
5055 case 0x10ec0663: 5212 case 0x10ec0663:
5056 case 0x10ec0665: 5213 case 0x10ec0665:
5214 case 0x10ec0668:
5057 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); 5215 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
5058 break; 5216 break;
5059 case 0x10ec0273: 5217 case 0x10ec0273:
@@ -5111,6 +5269,7 @@ static int patch_alc680(struct hda_codec *codec)
5111 */ 5269 */
5112static const struct hda_codec_preset snd_hda_preset_realtek[] = { 5270static const struct hda_codec_preset snd_hda_preset_realtek[] = {
5113 { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 }, 5271 { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
5272 { .id = 0x10ec0231, .name = "ALC231", .patch = patch_alc269 },
5114 { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 }, 5273 { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
5115 { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 }, 5274 { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
5116 { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 }, 5275 { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index d2cc0041d9d3..088a5afbd1b9 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2094,7 +2094,8 @@ static void stac92hd83xxx_fixup_hp_mic_led(struct hda_codec *codec,
2094 2094
2095 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 2095 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
2096 spec->mic_mute_led_gpio = 0x08; /* GPIO3 */ 2096 spec->mic_mute_led_gpio = 0x08; /* GPIO3 */
2097 codec->bus->avoid_link_reset = 1; 2097 /* resetting controller clears GPIO, so we need to keep on */
2098 codec->bus->power_keep_link_on = 1;
2098 } 2099 }
2099} 2100}
2100 2101
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
index 992ae38d5a15..1b372283bd01 100644
--- a/sound/soc/atmel/sam9x5_wm8731.c
+++ b/sound/soc/atmel/sam9x5_wm8731.c
@@ -97,6 +97,8 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
97 goto out; 97 goto out;
98 } 98 }
99 99
100 snd_soc_card_set_drvdata(card, priv);
101
100 card->dev = &pdev->dev; 102 card->dev = &pdev->dev;
101 card->owner = THIS_MODULE; 103 card->owner = THIS_MODULE;
102 card->dai_link = dai; 104 card->dai_link = dai;
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 21ae8d4fdbfb..1ad92cbf0b24 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -126,8 +126,6 @@ struct ab8500_codec_drvdata_dbg {
126 126
127/* Private data for AB8500 device-driver */ 127/* Private data for AB8500 device-driver */
128struct ab8500_codec_drvdata { 128struct ab8500_codec_drvdata {
129 struct regmap *regmap;
130
131 /* Sidetone */ 129 /* Sidetone */
132 long *sid_fir_values; 130 long *sid_fir_values;
133 enum sid_state sid_status; 131 enum sid_state sid_status;
@@ -168,34 +166,48 @@ static inline const char *amic_type_str(enum amic_type type)
168 */ 166 */
169 167
170/* Read a register from the audio-bank of AB8500 */ 168/* Read a register from the audio-bank of AB8500 */
171static int ab8500_codec_read_reg(void *context, unsigned int reg, 169static unsigned int ab8500_codec_read_reg(struct snd_soc_codec *codec,
172 unsigned int *value) 170 unsigned int reg)
173{ 171{
174 struct device *dev = context;
175 int status; 172 int status;
173 unsigned int value = 0;
176 174
177 u8 value8; 175 u8 value8;
178 status = abx500_get_register_interruptible(dev, AB8500_AUDIO, 176 status = abx500_get_register_interruptible(codec->dev, AB8500_AUDIO,
179 reg, &value8); 177 reg, &value8);
180 *value = (unsigned int)value8; 178 if (status < 0) {
179 dev_err(codec->dev,
180 "%s: ERROR: Register (0x%02x:0x%02x) read failed (%d).\n",
181 __func__, (u8)AB8500_AUDIO, (u8)reg, status);
182 } else {
183 dev_dbg(codec->dev,
184 "%s: Read 0x%02x from register 0x%02x:0x%02x\n",
185 __func__, value8, (u8)AB8500_AUDIO, (u8)reg);
186 value = (unsigned int)value8;
187 }
181 188
182 return status; 189 return value;
183} 190}
184 191
185/* Write to a register in the audio-bank of AB8500 */ 192/* Write to a register in the audio-bank of AB8500 */
186static int ab8500_codec_write_reg(void *context, unsigned int reg, 193static int ab8500_codec_write_reg(struct snd_soc_codec *codec,
187 unsigned int value) 194 unsigned int reg, unsigned int value)
188{ 195{
189 struct device *dev = context; 196 int status;
190 197
191 return abx500_set_register_interruptible(dev, AB8500_AUDIO, 198 status = abx500_set_register_interruptible(codec->dev, AB8500_AUDIO,
192 reg, value); 199 reg, value);
193} 200 if (status < 0)
201 dev_err(codec->dev,
202 "%s: ERROR: Register (%02x:%02x) write failed (%d).\n",
203 __func__, (u8)AB8500_AUDIO, (u8)reg, status);
204 else
205 dev_dbg(codec->dev,
206 "%s: Wrote 0x%02x into register %02x:%02x\n",
207 __func__, (u8)value, (u8)AB8500_AUDIO, (u8)reg);
194 208
195static const struct regmap_config ab8500_codec_regmap = { 209 return status;
196 .reg_read = ab8500_codec_read_reg, 210}
197 .reg_write = ab8500_codec_write_reg,
198};
199 211
200/* 212/*
201 * Controls - DAPM 213 * Controls - DAPM
@@ -2473,13 +2485,9 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
2473 2485
2474 dev_dbg(dev, "%s: Enter.\n", __func__); 2486 dev_dbg(dev, "%s: Enter.\n", __func__);
2475 2487
2476 snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
2477
2478 /* Setup AB8500 according to board-settings */ 2488 /* Setup AB8500 according to board-settings */
2479 pdata = dev_get_platdata(dev->parent); 2489 pdata = dev_get_platdata(dev->parent);
2480 2490
2481 codec->control_data = drvdata->regmap;
2482
2483 if (np) { 2491 if (np) {
2484 if (!pdata) 2492 if (!pdata)
2485 pdata = devm_kzalloc(dev, 2493 pdata = devm_kzalloc(dev,
@@ -2557,6 +2565,9 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
2557 2565
2558static struct snd_soc_codec_driver ab8500_codec_driver = { 2566static struct snd_soc_codec_driver ab8500_codec_driver = {
2559 .probe = ab8500_codec_probe, 2567 .probe = ab8500_codec_probe,
2568 .read = ab8500_codec_read_reg,
2569 .write = ab8500_codec_write_reg,
2570 .reg_word_size = sizeof(u8),
2560 .controls = ab8500_ctrls, 2571 .controls = ab8500_ctrls,
2561 .num_controls = ARRAY_SIZE(ab8500_ctrls), 2572 .num_controls = ARRAY_SIZE(ab8500_ctrls),
2562 .dapm_widgets = ab8500_dapm_widgets, 2573 .dapm_widgets = ab8500_dapm_widgets,
@@ -2581,15 +2592,6 @@ static int ab8500_codec_driver_probe(struct platform_device *pdev)
2581 drvdata->anc_status = ANC_UNCONFIGURED; 2592 drvdata->anc_status = ANC_UNCONFIGURED;
2582 dev_set_drvdata(&pdev->dev, drvdata); 2593 dev_set_drvdata(&pdev->dev, drvdata);
2583 2594
2584 drvdata->regmap = devm_regmap_init(&pdev->dev, NULL, &pdev->dev,
2585 &ab8500_codec_regmap);
2586 if (IS_ERR(drvdata->regmap)) {
2587 status = PTR_ERR(drvdata->regmap);
2588 dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
2589 __func__, status);
2590 return status;
2591 }
2592
2593 dev_dbg(&pdev->dev, "%s: Register codec.\n", __func__); 2595 dev_dbg(&pdev->dev, "%s: Register codec.\n", __func__);
2594 status = snd_soc_register_codec(&pdev->dev, &ab8500_codec_driver, 2596 status = snd_soc_register_codec(&pdev->dev, &ab8500_codec_driver,
2595 ab8500_codec_dai, 2597 ab8500_codec_dai,
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 6f05b17d1965..fea991031be1 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1529,6 +1529,8 @@ static void arizona_enable_fll(struct arizona_fll *fll,
1529 try_wait_for_completion(&fll->ok); 1529 try_wait_for_completion(&fll->ok);
1530 1530
1531 regmap_update_bits(arizona->regmap, fll->base + 1, 1531 regmap_update_bits(arizona->regmap, fll->base + 1,
1532 ARIZONA_FLL1_FREERUN, 0);
1533 regmap_update_bits(arizona->regmap, fll->base + 1,
1532 ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA); 1534 ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
1533 if (use_sync) 1535 if (use_sync)
1534 regmap_update_bits(arizona->regmap, fll->base + 0x11, 1536 regmap_update_bits(arizona->regmap, fll->base + 0x11,
@@ -1546,6 +1548,8 @@ static void arizona_disable_fll(struct arizona_fll *fll)
1546 struct arizona *arizona = fll->arizona; 1548 struct arizona *arizona = fll->arizona;
1547 bool change; 1549 bool change;
1548 1550
1551 regmap_update_bits(arizona->regmap, fll->base + 1,
1552 ARIZONA_FLL1_FREERUN, ARIZONA_FLL1_FREERUN);
1549 regmap_update_bits_check(arizona->regmap, fll->base + 1, 1553 regmap_update_bits_check(arizona->regmap, fll->base + 1,
1550 ARIZONA_FLL1_ENA, 0, &change); 1554 ARIZONA_FLL1_ENA, 0, &change);
1551 regmap_update_bits(arizona->regmap, fll->base + 0x11, 1555 regmap_update_bits(arizona->regmap, fll->base + 0x11,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index f2d1094424b9..99b359e19d35 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -37,6 +37,47 @@ struct wm5110_priv {
37 struct arizona_fll fll[2]; 37 struct arizona_fll fll[2];
38}; 38};
39 39
40static const struct reg_default wm5110_sysclk_revd_patch[] = {
41 { 0x3093, 0x1001 },
42 { 0x30E3, 0x1301 },
43 { 0x3133, 0x1201 },
44 { 0x3183, 0x1501 },
45 { 0x31D3, 0x1401 },
46};
47
48static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
49 struct snd_kcontrol *kcontrol, int event)
50{
51 struct snd_soc_codec *codec = w->codec;
52 struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
53 struct regmap *regmap = codec->control_data;
54 const struct reg_default *patch = NULL;
55 int i, patch_size;
56
57 switch (arizona->rev) {
58 case 3:
59 patch = wm5110_sysclk_revd_patch;
60 patch_size = ARRAY_SIZE(wm5110_sysclk_revd_patch);
61 break;
62 default:
63 return 0;
64 }
65
66 switch (event) {
67 case SND_SOC_DAPM_POST_PMU:
68 if (patch)
69 for (i = 0; i < patch_size; i++)
70 regmap_write(regmap, patch[i].reg,
71 patch[i].def);
72 break;
73
74 default:
75 break;
76 }
77
78 return 0;
79}
80
40static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0); 81static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
41static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); 82static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
42static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); 83static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
@@ -207,19 +248,6 @@ ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE),
207ARIZONA_MIXER_CONTROLS("SPKDAT2L", ARIZONA_OUT6LMIX_INPUT_1_SOURCE), 248ARIZONA_MIXER_CONTROLS("SPKDAT2L", ARIZONA_OUT6LMIX_INPUT_1_SOURCE),
208ARIZONA_MIXER_CONTROLS("SPKDAT2R", ARIZONA_OUT6RMIX_INPUT_1_SOURCE), 249ARIZONA_MIXER_CONTROLS("SPKDAT2R", ARIZONA_OUT6RMIX_INPUT_1_SOURCE),
209 250
210SOC_SINGLE("HPOUT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_1L,
211 ARIZONA_OUT1_OSR_SHIFT, 1, 0),
212SOC_SINGLE("HPOUT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_2L,
213 ARIZONA_OUT2_OSR_SHIFT, 1, 0),
214SOC_SINGLE("HPOUT3 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_3L,
215 ARIZONA_OUT3_OSR_SHIFT, 1, 0),
216SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L,
217 ARIZONA_OUT4_OSR_SHIFT, 1, 0),
218SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L,
219 ARIZONA_OUT5_OSR_SHIFT, 1, 0),
220SOC_SINGLE("SPKDAT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_6L,
221 ARIZONA_OUT6_OSR_SHIFT, 1, 0),
222
223SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L, 251SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L,
224 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1), 252 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1),
225SOC_DOUBLE_R("HPOUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L, 253SOC_DOUBLE_R("HPOUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L,
@@ -252,18 +280,6 @@ SOC_DOUBLE_R_TLV("SPKDAT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_6L,
252 ARIZONA_DAC_DIGITAL_VOLUME_6R, ARIZONA_OUT6L_VOL_SHIFT, 280 ARIZONA_DAC_DIGITAL_VOLUME_6R, ARIZONA_OUT6L_VOL_SHIFT,
253 0xbf, 0, digital_tlv), 281 0xbf, 0, digital_tlv),
254 282
255SOC_DOUBLE_R_RANGE_TLV("HPOUT1 Volume", ARIZONA_OUTPUT_PATH_CONFIG_1L,
256 ARIZONA_OUTPUT_PATH_CONFIG_1R,
257 ARIZONA_OUT1L_PGA_VOL_SHIFT,
258 0x34, 0x40, 0, ana_tlv),
259SOC_DOUBLE_R_RANGE_TLV("HPOUT2 Volume", ARIZONA_OUTPUT_PATH_CONFIG_2L,
260 ARIZONA_OUTPUT_PATH_CONFIG_2R,
261 ARIZONA_OUT2L_PGA_VOL_SHIFT,
262 0x34, 0x40, 0, ana_tlv),
263SOC_DOUBLE_R_RANGE_TLV("HPOUT3 Volume", ARIZONA_OUTPUT_PATH_CONFIG_3L,
264 ARIZONA_OUTPUT_PATH_CONFIG_3R,
265 ARIZONA_OUT3L_PGA_VOL_SHIFT, 0x34, 0x40, 0, ana_tlv),
266
267SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT, 283SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT,
268 ARIZONA_SPK1R_MUTE_SHIFT, 1, 1), 284 ARIZONA_SPK1R_MUTE_SHIFT, 1, 1),
269SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT, 285SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT,
@@ -400,7 +416,7 @@ static const struct snd_kcontrol_new wm5110_aec_loopback_mux =
400 416
401static const struct snd_soc_dapm_widget wm5110_dapm_widgets[] = { 417static const struct snd_soc_dapm_widget wm5110_dapm_widgets[] = {
402SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT, 418SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
403 0, NULL, 0), 419 0, wm5110_sysclk_ev, SND_SOC_DAPM_POST_PMU),
404SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1, 420SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
405 ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0), 421 ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
406SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK, 422SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 456bb8c6d759..bc7472c968e3 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -447,10 +447,10 @@ static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
447 iface |= 0x0001; 447 iface |= 0x0001;
448 break; 448 break;
449 case SND_SOC_DAIFMT_DSP_A: 449 case SND_SOC_DAIFMT_DSP_A:
450 iface |= 0x0003; 450 iface |= 0x0013;
451 break; 451 break;
452 case SND_SOC_DAIFMT_DSP_B: 452 case SND_SOC_DAIFMT_DSP_B:
453 iface |= 0x0013; 453 iface |= 0x0003;
454 break; 454 break;
455 default: 455 default:
456 return -EINVAL; 456 return -EINVAL;
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 253c88bb7a4c..4f05fb88bddf 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1259,6 +1259,8 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
1259 1259
1260 /* disable POBCTRL, SOFT_ST and BUFDCOPEN */ 1260 /* disable POBCTRL, SOFT_ST and BUFDCOPEN */
1261 snd_soc_write(codec, WM8990_ANTIPOP2, 0x0); 1261 snd_soc_write(codec, WM8990_ANTIPOP2, 0x0);
1262
1263 codec->cache_sync = 1;
1262 break; 1264 break;
1263 } 1265 }
1264 1266
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index fa64cd85204f..fb5d107f5603 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -238,7 +238,7 @@ static void davinci_pcm_dma_irq(unsigned link, u16 ch_status, void *data)
238 print_buf_info(prtd->ram_channel, "i ram_channel"); 238 print_buf_info(prtd->ram_channel, "i ram_channel");
239 pr_debug("davinci_pcm: link=%d, status=0x%x\n", link, ch_status); 239 pr_debug("davinci_pcm: link=%d, status=0x%x\n", link, ch_status);
240 240
241 if (unlikely(ch_status != DMA_COMPLETE)) 241 if (unlikely(ch_status != EDMA_DMA_COMPLETE))
242 return; 242 return;
243 243
244 if (snd_pcm_running(substream)) { 244 if (snd_pcm_running(substream)) {
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index eb4373840bb6..3665f612819d 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -69,7 +69,6 @@ static int pcm030_fabric_probe(struct platform_device *op)
69 return -ENOMEM; 69 return -ENOMEM;
70 70
71 card->dev = &op->dev; 71 card->dev = &op->dev;
72 platform_set_drvdata(op, pdata);
73 72
74 pdata->card = card; 73 pdata->card = card;
75 74
@@ -98,6 +97,8 @@ static int pcm030_fabric_probe(struct platform_device *op)
98 if (ret) 97 if (ret)
99 dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret); 98 dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret);
100 99
100 platform_set_drvdata(op, pdata);
101
101 return ret; 102 return ret;
102} 103}
103 104
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index d34d91743e3f..0b18f654b413 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -33,6 +33,10 @@
33 SNDRV_PCM_FMTBIT_S24_LE | \ 33 SNDRV_PCM_FMTBIT_S24_LE | \
34 SNDRV_PCM_FMTBIT_S32_LE) 34 SNDRV_PCM_FMTBIT_S32_LE)
35 35
36#define KIRKWOOD_SPDIF_FORMATS \
37 (SNDRV_PCM_FMTBIT_S16_LE | \
38 SNDRV_PCM_FMTBIT_S24_LE)
39
36static int kirkwood_i2s_set_fmt(struct snd_soc_dai *cpu_dai, 40static int kirkwood_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
37 unsigned int fmt) 41 unsigned int fmt)
38{ 42{
@@ -244,15 +248,15 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
244 ctl); 248 ctl);
245 } 249 }
246 250
247 if (dai->id == 0)
248 ctl &= ~KIRKWOOD_PLAYCTL_SPDIF_EN; /* i2s */
249 else
250 ctl &= ~KIRKWOOD_PLAYCTL_I2S_EN; /* spdif */
251
252 switch (cmd) { 251 switch (cmd) {
253 case SNDRV_PCM_TRIGGER_START: 252 case SNDRV_PCM_TRIGGER_START:
254 /* configure */ 253 /* configure */
255 ctl = priv->ctl_play; 254 ctl = priv->ctl_play;
255 if (dai->id == 0)
256 ctl &= ~KIRKWOOD_PLAYCTL_SPDIF_EN; /* i2s */
257 else
258 ctl &= ~KIRKWOOD_PLAYCTL_I2S_EN; /* spdif */
259
256 value = ctl & ~KIRKWOOD_PLAYCTL_ENABLE_MASK; 260 value = ctl & ~KIRKWOOD_PLAYCTL_ENABLE_MASK;
257 writel(value, priv->io + KIRKWOOD_PLAYCTL); 261 writel(value, priv->io + KIRKWOOD_PLAYCTL);
258 262
@@ -449,14 +453,14 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai[2] = {
449 .channels_max = 2, 453 .channels_max = 2,
450 .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | 454 .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
451 SNDRV_PCM_RATE_96000, 455 SNDRV_PCM_RATE_96000,
452 .formats = KIRKWOOD_I2S_FORMATS, 456 .formats = KIRKWOOD_SPDIF_FORMATS,
453 }, 457 },
454 .capture = { 458 .capture = {
455 .channels_min = 1, 459 .channels_min = 1,
456 .channels_max = 2, 460 .channels_max = 2,
457 .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | 461 .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
458 SNDRV_PCM_RATE_96000, 462 SNDRV_PCM_RATE_96000,
459 .formats = KIRKWOOD_I2S_FORMATS, 463 .formats = KIRKWOOD_SPDIF_FORMATS,
460 }, 464 },
461 .ops = &kirkwood_i2s_dai_ops, 465 .ops = &kirkwood_i2s_dai_ops,
462 }, 466 },
@@ -493,7 +497,7 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
493 .rates = SNDRV_PCM_RATE_8000_192000 | 497 .rates = SNDRV_PCM_RATE_8000_192000 |
494 SNDRV_PCM_RATE_CONTINUOUS | 498 SNDRV_PCM_RATE_CONTINUOUS |
495 SNDRV_PCM_RATE_KNOT, 499 SNDRV_PCM_RATE_KNOT,
496 .formats = KIRKWOOD_I2S_FORMATS, 500 .formats = KIRKWOOD_SPDIF_FORMATS,
497 }, 501 },
498 .capture = { 502 .capture = {
499 .channels_min = 1, 503 .channels_min = 1,
@@ -501,7 +505,7 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
501 .rates = SNDRV_PCM_RATE_8000_192000 | 505 .rates = SNDRV_PCM_RATE_8000_192000 |
502 SNDRV_PCM_RATE_CONTINUOUS | 506 SNDRV_PCM_RATE_CONTINUOUS |
503 SNDRV_PCM_RATE_KNOT, 507 SNDRV_PCM_RATE_KNOT,
504 .formats = KIRKWOOD_I2S_FORMATS, 508 .formats = KIRKWOOD_SPDIF_FORMATS,
505 }, 509 },
506 .ops = &kirkwood_i2s_dai_ops, 510 .ops = &kirkwood_i2s_dai_ops,
507 }, 511 },
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index 6d216cb6c19b..3fde9e402710 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -100,12 +100,12 @@ static int n810_startup(struct snd_pcm_substream *substream)
100 SNDRV_PCM_HW_PARAM_CHANNELS, 2, 2); 100 SNDRV_PCM_HW_PARAM_CHANNELS, 2, 2);
101 101
102 n810_ext_control(&codec->dapm); 102 n810_ext_control(&codec->dapm);
103 return clk_enable(sys_clkout2); 103 return clk_prepare_enable(sys_clkout2);
104} 104}
105 105
106static void n810_shutdown(struct snd_pcm_substream *substream) 106static void n810_shutdown(struct snd_pcm_substream *substream)
107{ 107{
108 clk_disable(sys_clkout2); 108 clk_disable_unprepare(sys_clkout2);
109} 109}
110 110
111static int n810_hw_params(struct snd_pcm_substream *substream, 111static int n810_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 14011d90d70a..ff60e11ecb56 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -37,6 +37,7 @@ config SND_SOC_SH4_SIU
37config SND_SOC_RCAR 37config SND_SOC_RCAR
38 tristate "R-Car series SRU/SCU/SSIU/SSI support" 38 tristate "R-Car series SRU/SCU/SSIU/SSI support"
39 select SND_SIMPLE_CARD 39 select SND_SIMPLE_CARD
40 select REGMAP
40 help 41 help
41 This option enables R-Car SUR/SCU/SSIU/SSI sound support 42 This option enables R-Car SUR/SCU/SSIU/SSI sound support
42 43
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 78c35b44fc04..b3653d37f75f 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -200,9 +200,8 @@ static void rsnd_dma_do_work(struct work_struct *work)
200 return; 200 return;
201 } 201 }
202 202
203 dma_async_issue_pending(dma->chan);
203 } 204 }
204
205 dma_async_issue_pending(dma->chan);
206} 205}
207 206
208int rsnd_dma_available(struct rsnd_dma *dma) 207int rsnd_dma_available(struct rsnd_dma *dma)
@@ -288,15 +287,13 @@ int rsnd_dai_connect(struct rsnd_dai *rdai,
288 struct rsnd_mod *mod, 287 struct rsnd_mod *mod,
289 struct rsnd_dai_stream *io) 288 struct rsnd_dai_stream *io)
290{ 289{
291 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 290 if (!mod)
292 struct device *dev = rsnd_priv_to_dev(priv);
293
294 if (!mod) {
295 dev_err(dev, "NULL mod\n");
296 return -EIO; 291 return -EIO;
297 }
298 292
299 if (!list_empty(&mod->list)) { 293 if (!list_empty(&mod->list)) {
294 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
295 struct device *dev = rsnd_priv_to_dev(priv);
296
300 dev_err(dev, "%s%d is not empty\n", 297 dev_err(dev, "%s%d is not empty\n",
301 rsnd_mod_name(mod), 298 rsnd_mod_name(mod),
302 rsnd_mod_id(mod)); 299 rsnd_mod_id(mod));
diff --git a/sound/soc/sh/rcar/scu.c b/sound/soc/sh/rcar/scu.c
index f4453e33a847..fa8fa15860b9 100644
--- a/sound/soc/sh/rcar/scu.c
+++ b/sound/soc/sh/rcar/scu.c
@@ -68,7 +68,7 @@ static int rsnd_scu_set_route(struct rsnd_priv *priv,
68 return 0; 68 return 0;
69 69
70 id = rsnd_mod_id(mod); 70 id = rsnd_mod_id(mod);
71 if (id < 0 || id > ARRAY_SIZE(routes)) 71 if (id < 0 || id >= ARRAY_SIZE(routes))
72 return -EIO; 72 return -EIO;
73 73
74 /* 74 /*
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 4e53d87e881d..a66783e13a9c 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3212,11 +3212,11 @@ int snd_soc_bytes_get(struct snd_kcontrol *kcontrol,
3212 break; 3212 break;
3213 case 2: 3213 case 2:
3214 ((u16 *)(&ucontrol->value.bytes.data))[0] 3214 ((u16 *)(&ucontrol->value.bytes.data))[0]
3215 &= ~params->mask; 3215 &= cpu_to_be16(~params->mask);
3216 break; 3216 break;
3217 case 4: 3217 case 4:
3218 ((u32 *)(&ucontrol->value.bytes.data))[0] 3218 ((u32 *)(&ucontrol->value.bytes.data))[0]
3219 &= ~params->mask; 3219 &= cpu_to_be32(~params->mask);
3220 break; 3220 break;
3221 default: 3221 default:
3222 return -EINVAL; 3222 return -EINVAL;
diff --git a/sound/soc/soc-devres.c b/sound/soc/soc-devres.c
index b1d732255c02..3449c1e909ae 100644
--- a/sound/soc/soc-devres.c
+++ b/sound/soc/soc-devres.c
@@ -66,7 +66,7 @@ static void devm_card_release(struct device *dev, void *res)
66 */ 66 */
67int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card) 67int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card)
68{ 68{
69 struct device **ptr; 69 struct snd_soc_card **ptr;
70 int ret; 70 int ret;
71 71
72 ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL); 72 ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL);
@@ -75,7 +75,7 @@ int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card)
75 75
76 ret = snd_soc_register_card(card); 76 ret = snd_soc_register_card(card);
77 if (ret == 0) { 77 if (ret == 0) {
78 *ptr = dev; 78 *ptr = card;
79 devres_add(dev, ptr); 79 devres_add(dev, ptr);
80 } else { 80 } else {
81 devres_free(ptr); 81 devres_free(ptr);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 42782c01e413..11a90cd027fa 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -148,12 +148,12 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream,
148 } 148 }
149} 149}
150 150
151static void soc_pcm_init_runtime_hw(struct snd_pcm_hardware *hw, 151static void soc_pcm_init_runtime_hw(struct snd_pcm_runtime *runtime,
152 struct snd_soc_pcm_stream *codec_stream, 152 struct snd_soc_pcm_stream *codec_stream,
153 struct snd_soc_pcm_stream *cpu_stream) 153 struct snd_soc_pcm_stream *cpu_stream)
154{ 154{
155 hw->rate_min = max(codec_stream->rate_min, cpu_stream->rate_min); 155 struct snd_pcm_hardware *hw = &runtime->hw;
156 hw->rate_max = max(codec_stream->rate_max, cpu_stream->rate_max); 156
157 hw->channels_min = max(codec_stream->channels_min, 157 hw->channels_min = max(codec_stream->channels_min,
158 cpu_stream->channels_min); 158 cpu_stream->channels_min);
159 hw->channels_max = min(codec_stream->channels_max, 159 hw->channels_max = min(codec_stream->channels_max,
@@ -166,6 +166,13 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_hardware *hw,
166 if (cpu_stream->rates 166 if (cpu_stream->rates
167 & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS)) 167 & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))
168 hw->rates |= codec_stream->rates; 168 hw->rates |= codec_stream->rates;
169
170 snd_pcm_limit_hw_rates(runtime);
171
172 hw->rate_min = max(hw->rate_min, cpu_stream->rate_min);
173 hw->rate_min = max(hw->rate_min, codec_stream->rate_min);
174 hw->rate_max = min_not_zero(hw->rate_max, cpu_stream->rate_max);
175 hw->rate_max = min_not_zero(hw->rate_max, codec_stream->rate_max);
169} 176}
170 177
171/* 178/*
@@ -235,15 +242,14 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
235 242
236 /* Check that the codec and cpu DAIs are compatible */ 243 /* Check that the codec and cpu DAIs are compatible */
237 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 244 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
238 soc_pcm_init_runtime_hw(&runtime->hw, &codec_dai_drv->playback, 245 soc_pcm_init_runtime_hw(runtime, &codec_dai_drv->playback,
239 &cpu_dai_drv->playback); 246 &cpu_dai_drv->playback);
240 } else { 247 } else {
241 soc_pcm_init_runtime_hw(&runtime->hw, &codec_dai_drv->capture, 248 soc_pcm_init_runtime_hw(runtime, &codec_dai_drv->capture,
242 &cpu_dai_drv->capture); 249 &cpu_dai_drv->capture);
243 } 250 }
244 251
245 ret = -EINVAL; 252 ret = -EINVAL;
246 snd_pcm_limit_hw_rates(runtime);
247 if (!runtime->hw.rates) { 253 if (!runtime->hw.rates) {
248 printk(KERN_ERR "ASoC: %s <-> %s No matching rates\n", 254 printk(KERN_ERR "ASoC: %s <-> %s No matching rates\n",
249 codec_dai->name, cpu_dai->name); 255 codec_dai->name, cpu_dai->name);
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index b9ba0fcc45df..83aabea259d7 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -636,8 +636,22 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
636 if (usb_pipein(ep->pipe) || 636 if (usb_pipein(ep->pipe) ||
637 snd_usb_endpoint_implicit_feedback_sink(ep)) { 637 snd_usb_endpoint_implicit_feedback_sink(ep)) {
638 638
639 urb_packs = packs_per_ms;
640 /*
641 * Wireless devices can poll at a max rate of once per 4ms.
642 * For dataintervals less than 5, increase the packet count to
643 * allow the host controller to use bursting to fill in the
644 * gaps.
645 */
646 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) {
647 int interval = ep->datainterval;
648 while (interval < 5) {
649 urb_packs <<= 1;
650 ++interval;
651 }
652 }
639 /* make capture URBs <= 1 ms and smaller than a period */ 653 /* make capture URBs <= 1 ms and smaller than a period */
640 urb_packs = min(max_packs_per_urb, packs_per_ms); 654 urb_packs = min(max_packs_per_urb, urb_packs);
641 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes) 655 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
642 urb_packs >>= 1; 656 urb_packs >>= 1;
643 ep->nurbs = MAX_URBS; 657 ep->nurbs = MAX_URBS;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 3454262358b3..f4b12c216f1c 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1603,7 +1603,7 @@ static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
1603 return err; 1603 return err;
1604 } 1604 }
1605 1605
1606 return err; 1606 return 0;
1607} 1607}
1608 1608
1609int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) 1609int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
diff --git a/tools/power/cpupower/man/cpupower-idle-info.1 b/tools/power/cpupower/man/cpupower-idle-info.1
index 4178effd9e99..7b3646adb92f 100644
--- a/tools/power/cpupower/man/cpupower-idle-info.1
+++ b/tools/power/cpupower/man/cpupower-idle-info.1
@@ -87,4 +87,5 @@ Thomas Renninger <trenn@suse.de>
87.fi 87.fi
88.SH "SEE ALSO" 88.SH "SEE ALSO"
89.LP 89.LP
90cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1) 90cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1),
91cpupower\-idle\-set(1)
diff --git a/tools/power/cpupower/man/cpupower-idle-set.1 b/tools/power/cpupower/man/cpupower-idle-set.1
new file mode 100644
index 000000000000..6b1607272a5b
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower-idle-set.1
@@ -0,0 +1,71 @@
1.TH "CPUPOWER-IDLE-SET" "1" "0.1" "" "cpupower Manual"
2.SH "NAME"
3.LP
4cpupower idle\-set \- Utility to set cpu idle state specific kernel options
5.SH "SYNTAX"
6.LP
7cpupower [ \-c cpulist ] idle\-info [\fIoptions\fP]
8.SH "DESCRIPTION"
9.LP
10The cpupower idle\-set subcommand allows to set cpu idle, also called cpu
11sleep state, specific options offered by the kernel. One example is disabling
12sleep states. This can be handy for power vs performance tuning.
13.SH "OPTIONS"
14.LP
15.TP
16\fB\-d\fR \fB\-\-disable\fR
17Disable a specific processor sleep state.
18.TP
19\fB\-e\fR \fB\-\-enable\fR
20Enable a specific processor sleep state.
21
22.SH "REMARKS"
23.LP
24Cpuidle Governors Policy on Disabling Sleep States
25
26.RS 4
27Depending on the used cpuidle governor, implementing the kernel policy
28how to choose sleep states, subsequent sleep states on this core, might get
29disabled as well.
30
31There are two cpuidle governors ladder and menu. While the ladder
32governor is always available, if CONFIG_CPU_IDLE is selected, the
33menu governor additionally requires CONFIG_NO_HZ.
34
35The behavior and the effect of the disable variable depends on the
36implementation of a particular governor. In the ladder governor, for
37example, it is not coherent, i.e. if one is disabling a light state,
38then all deeper states are disabled as well. Likewise, if one enables a
39deep state but a lighter state still is disabled, then this has no effect.
40.RE
41.LP
42Disabling the Lightest Sleep State may not have any Affect
43
44.RS 4
45If criteria are not met to enter deeper sleep states and the lightest sleep
46state is chosen when idle, the kernel may still enter this sleep state,
47irrespective of whether it is disabled or not. This is also reflected in
48the usage count of the disabled sleep state when using the cpupower idle-info
49command.
50.RE
51.LP
52Selecting specific CPU Cores
53
54.RS 4
55By default processor sleep states of all CPU cores are set. Please refer
56to the cpupower(1) manpage in the \-\-cpu option section how to disable
57C-states of specific cores.
58.RE
59.SH "FILES"
60.nf
61\fI/sys/devices/system/cpu/cpu*/cpuidle/state*\fP
62\fI/sys/devices/system/cpu/cpuidle/*\fP
63.fi
64.SH "AUTHORS"
65.nf
66Thomas Renninger <trenn@suse.de>
67.fi
68.SH "SEE ALSO"
69.LP
70cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1),
71cpupower\-idle\-info(1)
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c
index 5cdc600e8152..851c7a16ca49 100644
--- a/tools/power/cpupower/utils/helpers/sysfs.c
+++ b/tools/power/cpupower/utils/helpers/sysfs.c
@@ -278,7 +278,7 @@ static char *sysfs_idlestate_get_one_string(unsigned int cpu,
278int sysfs_is_idlestate_disabled(unsigned int cpu, 278int sysfs_is_idlestate_disabled(unsigned int cpu,
279 unsigned int idlestate) 279 unsigned int idlestate)
280{ 280{
281 if (sysfs_get_idlestate_count(cpu) < idlestate) 281 if (sysfs_get_idlestate_count(cpu) <= idlestate)
282 return -1; 282 return -1;
283 283
284 if (!sysfs_idlestate_file_exists(cpu, idlestate, 284 if (!sysfs_idlestate_file_exists(cpu, idlestate,
@@ -303,7 +303,7 @@ int sysfs_idlestate_disable(unsigned int cpu,
303 char value[SYSFS_PATH_MAX]; 303 char value[SYSFS_PATH_MAX];
304 int bytes_written; 304 int bytes_written;
305 305
306 if (sysfs_get_idlestate_count(cpu) < idlestate) 306 if (sysfs_get_idlestate_count(cpu) <= idlestate)
307 return -1; 307 return -1;
308 308
309 if (!sysfs_idlestate_file_exists(cpu, idlestate, 309 if (!sysfs_idlestate_file_exists(cpu, idlestate,
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index fe702076ca46..9d77f13c2d25 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2,7 +2,7 @@
2 * turbostat -- show CPU frequency and C-state residency 2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors. 3 * on modern Intel turbo-capable processors.
4 * 4 *
5 * Copyright (c) 2012 Intel Corporation. 5 * Copyright (c) 2013 Intel Corporation.
6 * Len Brown <len.brown@intel.com> 6 * Len Brown <len.brown@intel.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
@@ -47,6 +47,8 @@ unsigned int skip_c1;
47unsigned int do_nhm_cstates; 47unsigned int do_nhm_cstates;
48unsigned int do_snb_cstates; 48unsigned int do_snb_cstates;
49unsigned int do_c8_c9_c10; 49unsigned int do_c8_c9_c10;
50unsigned int do_slm_cstates;
51unsigned int use_c1_residency_msr;
50unsigned int has_aperf; 52unsigned int has_aperf;
51unsigned int has_epb; 53unsigned int has_epb;
52unsigned int units = 1000000000; /* Ghz etc */ 54unsigned int units = 1000000000; /* Ghz etc */
@@ -81,6 +83,8 @@ double rapl_joule_counter_range;
81#define RAPL_DRAM (1 << 3) 83#define RAPL_DRAM (1 << 3)
82#define RAPL_PKG_PERF_STATUS (1 << 4) 84#define RAPL_PKG_PERF_STATUS (1 << 4)
83#define RAPL_DRAM_PERF_STATUS (1 << 5) 85#define RAPL_DRAM_PERF_STATUS (1 << 5)
86#define RAPL_PKG_POWER_INFO (1 << 6)
87#define RAPL_CORE_POLICY (1 << 7)
84#define TJMAX_DEFAULT 100 88#define TJMAX_DEFAULT 100
85 89
86#define MAX(a, b) ((a) > (b) ? (a) : (b)) 90#define MAX(a, b) ((a) > (b) ? (a) : (b))
@@ -96,7 +100,7 @@ struct thread_data {
96 unsigned long long tsc; 100 unsigned long long tsc;
97 unsigned long long aperf; 101 unsigned long long aperf;
98 unsigned long long mperf; 102 unsigned long long mperf;
99 unsigned long long c1; /* derived */ 103 unsigned long long c1;
100 unsigned long long extra_msr64; 104 unsigned long long extra_msr64;
101 unsigned long long extra_delta64; 105 unsigned long long extra_delta64;
102 unsigned long long extra_msr32; 106 unsigned long long extra_msr32;
@@ -266,7 +270,7 @@ void print_header(void)
266 outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64); 270 outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64);
267 if (do_nhm_cstates) 271 if (do_nhm_cstates)
268 outp += sprintf(outp, " %%c1"); 272 outp += sprintf(outp, " %%c1");
269 if (do_nhm_cstates) 273 if (do_nhm_cstates && !do_slm_cstates)
270 outp += sprintf(outp, " %%c3"); 274 outp += sprintf(outp, " %%c3");
271 if (do_nhm_cstates) 275 if (do_nhm_cstates)
272 outp += sprintf(outp, " %%c6"); 276 outp += sprintf(outp, " %%c6");
@@ -280,9 +284,9 @@ void print_header(void)
280 284
281 if (do_snb_cstates) 285 if (do_snb_cstates)
282 outp += sprintf(outp, " %%pc2"); 286 outp += sprintf(outp, " %%pc2");
283 if (do_nhm_cstates) 287 if (do_nhm_cstates && !do_slm_cstates)
284 outp += sprintf(outp, " %%pc3"); 288 outp += sprintf(outp, " %%pc3");
285 if (do_nhm_cstates) 289 if (do_nhm_cstates && !do_slm_cstates)
286 outp += sprintf(outp, " %%pc6"); 290 outp += sprintf(outp, " %%pc6");
287 if (do_snb_cstates) 291 if (do_snb_cstates)
288 outp += sprintf(outp, " %%pc7"); 292 outp += sprintf(outp, " %%pc7");
@@ -480,7 +484,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
480 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 484 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
481 goto done; 485 goto done;
482 486
483 if (do_nhm_cstates) 487 if (do_nhm_cstates && !do_slm_cstates)
484 outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc); 488 outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
485 if (do_nhm_cstates) 489 if (do_nhm_cstates)
486 outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc); 490 outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
@@ -499,9 +503,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
499 503
500 if (do_snb_cstates) 504 if (do_snb_cstates)
501 outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc); 505 outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
502 if (do_nhm_cstates) 506 if (do_nhm_cstates && !do_slm_cstates)
503 outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc); 507 outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
504 if (do_nhm_cstates) 508 if (do_nhm_cstates && !do_slm_cstates)
505 outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc); 509 outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
506 if (do_snb_cstates) 510 if (do_snb_cstates)
507 outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc); 511 outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
@@ -648,17 +652,24 @@ delta_thread(struct thread_data *new, struct thread_data *old,
648 } 652 }
649 653
650 654
651 /* 655 if (use_c1_residency_msr) {
652 * As counter collection is not atomic, 656 /*
653 * it is possible for mperf's non-halted cycles + idle states 657 * Some models have a dedicated C1 residency MSR,
654 * to exceed TSC's all cycles: show c1 = 0% in that case. 658 * which should be more accurate than the derivation below.
655 */ 659 */
656 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) 660 } else {
657 old->c1 = 0; 661 /*
658 else { 662 * As counter collection is not atomic,
659 /* normal case, derive c1 */ 663 * it is possible for mperf's non-halted cycles + idle states
660 old->c1 = old->tsc - old->mperf - core_delta->c3 664 * to exceed TSC's all cycles: show c1 = 0% in that case.
665 */
666 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
667 old->c1 = 0;
668 else {
669 /* normal case, derive c1 */
670 old->c1 = old->tsc - old->mperf - core_delta->c3
661 - core_delta->c6 - core_delta->c7; 671 - core_delta->c6 - core_delta->c7;
672 }
662 } 673 }
663 674
664 if (old->mperf == 0) { 675 if (old->mperf == 0) {
@@ -872,13 +883,21 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
872 if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64)) 883 if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
873 return -5; 884 return -5;
874 885
886 if (use_c1_residency_msr) {
887 if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
888 return -6;
889 }
890
875 /* collect core counters only for 1st thread in core */ 891 /* collect core counters only for 1st thread in core */
876 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 892 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
877 return 0; 893 return 0;
878 894
879 if (do_nhm_cstates) { 895 if (do_nhm_cstates && !do_slm_cstates) {
880 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 896 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
881 return -6; 897 return -6;
898 }
899
900 if (do_nhm_cstates) {
882 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) 901 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
883 return -7; 902 return -7;
884 } 903 }
@@ -898,7 +917,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
898 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 917 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
899 return 0; 918 return 0;
900 919
901 if (do_nhm_cstates) { 920 if (do_nhm_cstates && !do_slm_cstates) {
902 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) 921 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
903 return -9; 922 return -9;
904 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) 923 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
@@ -977,7 +996,7 @@ void print_verbose_header(void)
977 ratio, bclk, ratio * bclk); 996 ratio, bclk, ratio * bclk);
978 997
979 get_msr(0, MSR_IA32_POWER_CTL, &msr); 998 get_msr(0, MSR_IA32_POWER_CTL, &msr);
980 fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E: %sabled)\n", 999 fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
981 msr, msr & 0x2 ? "EN" : "DIS"); 1000 msr, msr & 0x2 ? "EN" : "DIS");
982 1001
983 if (!do_ivt_turbo_ratio_limit) 1002 if (!do_ivt_turbo_ratio_limit)
@@ -1046,25 +1065,28 @@ print_nhm_turbo_ratio_limits:
1046 1065
1047 switch(msr & 0x7) { 1066 switch(msr & 0x7) {
1048 case 0: 1067 case 0:
1049 fprintf(stderr, "pc0"); 1068 fprintf(stderr, do_slm_cstates ? "no pkg states" : "pc0");
1050 break; 1069 break;
1051 case 1: 1070 case 1:
1052 fprintf(stderr, do_snb_cstates ? "pc2" : "pc0"); 1071 fprintf(stderr, do_slm_cstates ? "no pkg states" : do_snb_cstates ? "pc2" : "pc0");
1053 break; 1072 break;
1054 case 2: 1073 case 2:
1055 fprintf(stderr, do_snb_cstates ? "pc6-noret" : "pc3"); 1074 fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc6-noret" : "pc3");
1056 break; 1075 break;
1057 case 3: 1076 case 3:
1058 fprintf(stderr, "pc6"); 1077 fprintf(stderr, do_slm_cstates ? "invalid" : "pc6");
1059 break; 1078 break;
1060 case 4: 1079 case 4:
1061 fprintf(stderr, "pc7"); 1080 fprintf(stderr, do_slm_cstates ? "pc4" : "pc7");
1062 break; 1081 break;
1063 case 5: 1082 case 5:
1064 fprintf(stderr, do_snb_cstates ? "pc7s" : "invalid"); 1083 fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc7s" : "invalid");
1084 break;
1085 case 6:
1086 fprintf(stderr, do_slm_cstates ? "pc6" : "invalid");
1065 break; 1087 break;
1066 case 7: 1088 case 7:
1067 fprintf(stderr, "unlimited"); 1089 fprintf(stderr, do_slm_cstates ? "pc7" : "unlimited");
1068 break; 1090 break;
1069 default: 1091 default:
1070 fprintf(stderr, "invalid"); 1092 fprintf(stderr, "invalid");
@@ -1460,6 +1482,8 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
1460 case 0x3F: /* HSW */ 1482 case 0x3F: /* HSW */
1461 case 0x45: /* HSW */ 1483 case 0x45: /* HSW */
1462 case 0x46: /* HSW */ 1484 case 0x46: /* HSW */
1485 case 0x37: /* BYT */
1486 case 0x4D: /* AVN */
1463 return 1; 1487 return 1;
1464 case 0x2E: /* Nehalem-EX Xeon - Beckton */ 1488 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1465 case 0x2F: /* Westmere-EX Xeon - Eagleton */ 1489 case 0x2F: /* Westmere-EX Xeon - Eagleton */
@@ -1532,14 +1556,33 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1532#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ 1556#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
1533#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ 1557#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
1534 1558
1559double get_tdp(model)
1560{
1561 unsigned long long msr;
1562
1563 if (do_rapl & RAPL_PKG_POWER_INFO)
1564 if (!get_msr(0, MSR_PKG_POWER_INFO, &msr))
1565 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
1566
1567 switch (model) {
1568 case 0x37:
1569 case 0x4D:
1570 return 30.0;
1571 default:
1572 return 135.0;
1573 }
1574}
1575
1576
1535/* 1577/*
1536 * rapl_probe() 1578 * rapl_probe()
1537 * 1579 *
1538 * sets do_rapl 1580 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
1539 */ 1581 */
1540void rapl_probe(unsigned int family, unsigned int model) 1582void rapl_probe(unsigned int family, unsigned int model)
1541{ 1583{
1542 unsigned long long msr; 1584 unsigned long long msr;
1585 unsigned int time_unit;
1543 double tdp; 1586 double tdp;
1544 1587
1545 if (!genuine_intel) 1588 if (!genuine_intel)
@@ -1555,11 +1598,15 @@ void rapl_probe(unsigned int family, unsigned int model)
1555 case 0x3F: /* HSW */ 1598 case 0x3F: /* HSW */
1556 case 0x45: /* HSW */ 1599 case 0x45: /* HSW */
1557 case 0x46: /* HSW */ 1600 case 0x46: /* HSW */
1558 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX; 1601 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
1559 break; 1602 break;
1560 case 0x2D: 1603 case 0x2D:
1561 case 0x3E: 1604 case 0x3E:
1562 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS; 1605 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
1606 break;
1607 case 0x37: /* BYT */
1608 case 0x4D: /* AVN */
1609 do_rapl = RAPL_PKG | RAPL_CORES ;
1563 break; 1610 break;
1564 default: 1611 default:
1565 return; 1612 return;
@@ -1570,19 +1617,22 @@ void rapl_probe(unsigned int family, unsigned int model)
1570 return; 1617 return;
1571 1618
1572 rapl_power_units = 1.0 / (1 << (msr & 0xF)); 1619 rapl_power_units = 1.0 / (1 << (msr & 0xF));
1573 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); 1620 if (model == 0x37)
1574 rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF)); 1621 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
1622 else
1623 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
1575 1624
1576 /* get TDP to determine energy counter range */ 1625 time_unit = msr >> 16 & 0xF;
1577 if (get_msr(0, MSR_PKG_POWER_INFO, &msr)) 1626 if (time_unit == 0)
1578 return; 1627 time_unit = 0xA;
1579 1628
1580 tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; 1629 rapl_time_units = 1.0 / (1 << (time_unit));
1581 1630
1582 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; 1631 tdp = get_tdp(model);
1583 1632
1633 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
1584 if (verbose) 1634 if (verbose)
1585 fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range); 1635 fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
1586 1636
1587 return; 1637 return;
1588} 1638}
@@ -1668,7 +1718,6 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1668{ 1718{
1669 unsigned long long msr; 1719 unsigned long long msr;
1670 int cpu; 1720 int cpu;
1671 double local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units;
1672 1721
1673 if (!do_rapl) 1722 if (!do_rapl)
1674 return 0; 1723 return 0;
@@ -1686,23 +1735,13 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1686 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) 1735 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
1687 return -1; 1736 return -1;
1688 1737
1689 local_rapl_power_units = 1.0 / (1 << (msr & 0xF));
1690 local_rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
1691 local_rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF));
1692
1693 if (local_rapl_power_units != rapl_power_units)
1694 fprintf(stderr, "cpu%d, ERROR: Power units mis-match\n", cpu);
1695 if (local_rapl_energy_units != rapl_energy_units)
1696 fprintf(stderr, "cpu%d, ERROR: Energy units mis-match\n", cpu);
1697 if (local_rapl_time_units != rapl_time_units)
1698 fprintf(stderr, "cpu%d, ERROR: Time units mis-match\n", cpu);
1699
1700 if (verbose) { 1738 if (verbose) {
1701 fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx " 1739 fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
1702 "(%f Watts, %f Joules, %f sec.)\n", cpu, msr, 1740 "(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
1703 local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units); 1741 rapl_power_units, rapl_energy_units, rapl_time_units);
1704 } 1742 }
1705 if (do_rapl & RAPL_PKG) { 1743 if (do_rapl & RAPL_PKG_POWER_INFO) {
1744
1706 if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) 1745 if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
1707 return -5; 1746 return -5;
1708 1747
@@ -1714,6 +1753,9 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1714 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, 1753 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
1715 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); 1754 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
1716 1755
1756 }
1757 if (do_rapl & RAPL_PKG) {
1758
1717 if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr)) 1759 if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
1718 return -9; 1760 return -9;
1719 1761
@@ -1749,12 +1791,16 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1749 1791
1750 print_power_limit_msr(cpu, msr, "DRAM Limit"); 1792 print_power_limit_msr(cpu, msr, "DRAM Limit");
1751 } 1793 }
1752 if (do_rapl & RAPL_CORES) { 1794 if (do_rapl & RAPL_CORE_POLICY) {
1753 if (verbose) { 1795 if (verbose) {
1754 if (get_msr(cpu, MSR_PP0_POLICY, &msr)) 1796 if (get_msr(cpu, MSR_PP0_POLICY, &msr))
1755 return -7; 1797 return -7;
1756 1798
1757 fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); 1799 fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
1800 }
1801 }
1802 if (do_rapl & RAPL_CORES) {
1803 if (verbose) {
1758 1804
1759 if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) 1805 if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
1760 return -9; 1806 return -9;
@@ -1813,10 +1859,48 @@ int has_c8_c9_c10(unsigned int family, unsigned int model)
1813} 1859}
1814 1860
1815 1861
1862int is_slm(unsigned int family, unsigned int model)
1863{
1864 if (!genuine_intel)
1865 return 0;
1866 switch (model) {
1867 case 0x37: /* BYT */
1868 case 0x4D: /* AVN */
1869 return 1;
1870 }
1871 return 0;
1872}
1873
1874#define SLM_BCLK_FREQS 5
1875double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
1876
1877double slm_bclk(void)
1878{
1879 unsigned long long msr = 3;
1880 unsigned int i;
1881 double freq;
1882
1883 if (get_msr(0, MSR_FSB_FREQ, &msr))
1884 fprintf(stderr, "SLM BCLK: unknown\n");
1885
1886 i = msr & 0xf;
1887 if (i >= SLM_BCLK_FREQS) {
1888 fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
1889 msr = 3;
1890 }
1891 freq = slm_freq_table[i];
1892
1893 fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
1894
1895 return freq;
1896}
1897
1816double discover_bclk(unsigned int family, unsigned int model) 1898double discover_bclk(unsigned int family, unsigned int model)
1817{ 1899{
1818 if (is_snb(family, model)) 1900 if (is_snb(family, model))
1819 return 100.00; 1901 return 100.00;
1902 else if (is_slm(family, model))
1903 return slm_bclk();
1820 else 1904 else
1821 return 133.33; 1905 return 133.33;
1822} 1906}
@@ -1873,7 +1957,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
1873 fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", 1957 fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
1874 cpu, msr, target_c_local); 1958 cpu, msr, target_c_local);
1875 1959
1876 if (target_c_local < 85 || target_c_local > 120) 1960 if (target_c_local < 85 || target_c_local > 127)
1877 goto guess; 1961 goto guess;
1878 1962
1879 tcc_activation_temp = target_c_local; 1963 tcc_activation_temp = target_c_local;
@@ -1970,6 +2054,7 @@ void check_cpuid()
1970 do_smi = do_nhm_cstates; 2054 do_smi = do_nhm_cstates;
1971 do_snb_cstates = is_snb(family, model); 2055 do_snb_cstates = is_snb(family, model);
1972 do_c8_c9_c10 = has_c8_c9_c10(family, model); 2056 do_c8_c9_c10 = has_c8_c9_c10(family, model);
2057 do_slm_cstates = is_slm(family, model);
1973 bclk = discover_bclk(family, model); 2058 bclk = discover_bclk(family, model);
1974 2059
1975 do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model); 2060 do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
@@ -2331,7 +2416,7 @@ int main(int argc, char **argv)
2331 cmdline(argc, argv); 2416 cmdline(argc, argv);
2332 2417
2333 if (verbose) 2418 if (verbose)
2334 fprintf(stderr, "turbostat v3.4 April 17, 2013" 2419 fprintf(stderr, "turbostat v3.5 April 26, 2013"
2335 " - Len Brown <lenb@kernel.org>\n"); 2420 " - Len Brown <lenb@kernel.org>\n");
2336 2421
2337 turbostat_init(); 2422 turbostat_init();
diff --git a/tools/usb/Makefile b/tools/usb/Makefile
index 396d6c44e9d7..acf2165c04e6 100644
--- a/tools/usb/Makefile
+++ b/tools/usb/Makefile
@@ -3,11 +3,12 @@
3CC = $(CROSS_COMPILE)gcc 3CC = $(CROSS_COMPILE)gcc
4PTHREAD_LIBS = -lpthread 4PTHREAD_LIBS = -lpthread
5WARNINGS = -Wall -Wextra 5WARNINGS = -Wall -Wextra
6CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) -I../include 6CFLAGS = $(WARNINGS) -g -I../include
7LDFLAGS = $(PTHREAD_LIBS)
7 8
8all: testusb ffs-test 9all: testusb ffs-test
9%: %.c 10%: %.c
10 $(CC) $(CFLAGS) -o $@ $^ 11 $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
11 12
12clean: 13clean:
13 $(RM) testusb ffs-test 14 $(RM) testusb ffs-test
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 662f34c3287e..4f588bc94186 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1615,8 +1615,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
1615 1615
1616int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1616int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1617{ 1617{
1618 return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page, 1618 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
1619 offset, len); 1619
1620 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
1620} 1621}
1621EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1622EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1622 1623
@@ -1897,6 +1898,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1897 int r; 1898 int r;
1898 struct kvm_vcpu *vcpu, *v; 1899 struct kvm_vcpu *vcpu, *v;
1899 1900
1901 if (id >= KVM_MAX_VCPUS)
1902 return -EINVAL;
1903
1900 vcpu = kvm_arch_vcpu_create(kvm, id); 1904 vcpu = kvm_arch_vcpu_create(kvm, id);
1901 if (IS_ERR(vcpu)) 1905 if (IS_ERR(vcpu))
1902 return PTR_ERR(vcpu); 1906 return PTR_ERR(vcpu);